Spaces:
Runtime error
Runtime error
ajout modèle quantifié
Browse files- app/main.py +46 -7
- app/model.py +237 -34
- app/utils.py +1 -1
- model/efficientnetv2m_float16.tflite +3 -0
app/main.py
CHANGED
|
@@ -5,7 +5,7 @@ from pydantic import BaseModel
|
|
| 5 |
from typing import Union
|
| 6 |
import base64
|
| 7 |
|
| 8 |
-
from app.model import load_model, predict_with_model
|
| 9 |
import os
|
| 10 |
import threading
|
| 11 |
import time
|
|
@@ -32,18 +32,16 @@ def startup():
|
|
| 32 |
|
| 33 |
|
| 34 |
class ImagePayload(BaseModel):
|
| 35 |
-
image: str
|
| 36 |
-
|
| 37 |
|
| 38 |
@app.post("/predict")
|
| 39 |
async def predict(request: Request,
|
| 40 |
file: UploadFile = File(None),
|
| 41 |
payload: Union[ImagePayload, None] = None,
|
| 42 |
-
show_heatmap: bool = Query(False, description="Afficher la heatmap"),
|
| 43 |
):
|
| 44 |
|
| 45 |
logger.info("🔁 Requête reçue")
|
| 46 |
-
logger.info(f"✅ Show heatmap : {show_heatmap}")
|
| 47 |
|
| 48 |
try:
|
| 49 |
# Cas 1 : multipart avec fichier
|
|
@@ -61,12 +59,11 @@ async def predict(request: Request,
|
|
| 61 |
raise HTTPException(status_code=400, detail="Format de requête non supporté.")
|
| 62 |
|
| 63 |
# Appel de ta logique de prédiction
|
| 64 |
-
logger.debug("🔍 Appel du vote multi-modèles...")
|
| 65 |
models = load_model()
|
| 66 |
if not models:
|
| 67 |
raise HTTPException(status_code=500, detail="Aucun modèle chargé.")
|
| 68 |
model_config = models[0]
|
| 69 |
-
prediction = predict_with_model(model_config, image_bytes
|
| 70 |
|
| 71 |
# Pour l’instant : réponse simulée
|
| 72 |
return prediction
|
|
@@ -76,6 +73,48 @@ async def predict(request: Request,
|
|
| 76 |
raise HTTPException(status_code=500, detail=str(e))
|
| 77 |
|
| 78 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
@app.get("/health")
|
| 80 |
def health_check():
|
| 81 |
return {
|
|
|
|
| 5 |
from typing import Union
|
| 6 |
import base64
|
| 7 |
|
| 8 |
+
from app.model import load_model, predict_with_model,get_heatmap
|
| 9 |
import os
|
| 10 |
import threading
|
| 11 |
import time
|
|
|
|
| 32 |
|
| 33 |
|
| 34 |
class ImagePayload(BaseModel):
|
| 35 |
+
image: str
|
| 36 |
+
predicted_class_index: int
|
| 37 |
|
| 38 |
@app.post("/predict")
|
| 39 |
async def predict(request: Request,
|
| 40 |
file: UploadFile = File(None),
|
| 41 |
payload: Union[ImagePayload, None] = None,
|
|
|
|
| 42 |
):
|
| 43 |
|
| 44 |
logger.info("🔁 Requête reçue")
|
|
|
|
| 45 |
|
| 46 |
try:
|
| 47 |
# Cas 1 : multipart avec fichier
|
|
|
|
| 59 |
raise HTTPException(status_code=400, detail="Format de requête non supporté.")
|
| 60 |
|
| 61 |
# Appel de ta logique de prédiction
|
|
|
|
| 62 |
models = load_model()
|
| 63 |
if not models:
|
| 64 |
raise HTTPException(status_code=500, detail="Aucun modèle chargé.")
|
| 65 |
model_config = models[0]
|
| 66 |
+
prediction = predict_with_model(model_config, image_bytes)
|
| 67 |
|
| 68 |
# Pour l’instant : réponse simulée
|
| 69 |
return prediction
|
|
|
|
| 73 |
raise HTTPException(status_code=500, detail=str(e))
|
| 74 |
|
| 75 |
|
| 76 |
+
|
| 77 |
+
|
| 78 |
+
|
| 79 |
+
@app.post("/heatmap")
|
| 80 |
+
async def predict_heatmap(
|
| 81 |
+
request: Request,
|
| 82 |
+
payload: Union[ImagePayload, None] = None,
|
| 83 |
+
file: UploadFile = File(None),
|
| 84 |
+
predicted_class_index: int = Query(None)
|
| 85 |
+
):
|
| 86 |
+
logger.info("🔁 Requête reçue pour heatmap")
|
| 87 |
+
|
| 88 |
+
try:
|
| 89 |
+
if file is not None:
|
| 90 |
+
image_bytes = await file.read()
|
| 91 |
+
logger.debug(f"✅ Image reçue via multipart : {file.filename} — {len(image_bytes)} octets")
|
| 92 |
+
if predicted_class_index is None:
|
| 93 |
+
raise HTTPException(status_code=400, detail="predicted_class_index requis en query avec fichier multipart")
|
| 94 |
+
|
| 95 |
+
elif payload is not None:
|
| 96 |
+
image_bytes = base64.b64decode(payload.image)
|
| 97 |
+
predicted_class_index = payload.predicted_class_index
|
| 98 |
+
logger.debug(f"✅ Image reçue en JSON base64 : {len(image_bytes)} octets, class={predicted_class_index}")
|
| 99 |
+
|
| 100 |
+
else:
|
| 101 |
+
raise HTTPException(status_code=400, detail="Aucune image reçue")
|
| 102 |
+
|
| 103 |
+
models = load_model()
|
| 104 |
+
if not models:
|
| 105 |
+
raise HTTPException(status_code=500, detail="Aucun modèle chargé.")
|
| 106 |
+
model_config = models[0]
|
| 107 |
+
|
| 108 |
+
heatmap = get_heatmap(model_config, image_bytes, predicted_class_index)
|
| 109 |
+
|
| 110 |
+
return {"heatmap": heatmap}
|
| 111 |
+
|
| 112 |
+
except Exception as e:
|
| 113 |
+
logger.error("❌ Erreur heatmap", exc_info=True)
|
| 114 |
+
raise HTTPException(status_code=500, detail=str(e))
|
| 115 |
+
|
| 116 |
+
|
| 117 |
+
|
| 118 |
@app.get("/health")
|
| 119 |
def health_check():
|
| 120 |
return {
|
app/model.py
CHANGED
|
@@ -6,7 +6,6 @@ import logging
|
|
| 6 |
import numpy as np
|
| 7 |
from PIL import Image
|
| 8 |
from keras.applications.efficientnet_v2 import preprocess_input as effnet_preprocess
|
| 9 |
-
from keras.applications.resnet_v2 import preprocess_input as resnet_preprocess
|
| 10 |
import io
|
| 11 |
from tf_keras_vis.gradcam import Gradcam,GradcamPlusPlus
|
| 12 |
from tf_keras_vis.utils import normalize
|
|
@@ -31,10 +30,179 @@ logger = logging.getLogger(__name__)
|
|
| 31 |
confidence_threshold=0.55
|
| 32 |
entropy_threshold=2
|
| 33 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 34 |
class ModelStruct(TypedDict):
|
| 35 |
model_name: str
|
| 36 |
model: tf.keras.Model
|
| 37 |
gradcam_model:tf.keras.Model
|
|
|
|
| 38 |
preprocess_input: Callable[[np.ndarray], Any]
|
| 39 |
target_size: tuple[int, int]
|
| 40 |
last_conv_layer:str
|
|
@@ -47,11 +215,13 @@ def load_model() -> list[ModelStruct]:
|
|
| 47 |
if _model_cache is None:
|
| 48 |
print("📦 Chargement du modèle EfficientNetV2M...")
|
| 49 |
model = tf.keras.models.load_model("model/best_efficientnetv2m_gradcam.keras", compile=False)
|
|
|
|
| 50 |
|
| 51 |
_model_cache = [{
|
| 52 |
"model_name": "EfficientNetV2M",
|
| 53 |
"model": model,
|
| 54 |
"gradcam_model": model,
|
|
|
|
| 55 |
"preprocess_input": effnet_preprocess,
|
| 56 |
"target_size": (480, 480),
|
| 57 |
"last_conv_layer": "block7a_expand_conv",
|
|
@@ -111,6 +281,7 @@ def compute_gradcam(model, image_array, class_index=None, layer_name=None,gradca
|
|
| 111 |
return cam
|
| 112 |
|
| 113 |
|
|
|
|
| 114 |
def preprocess_image(image_bytes, target_size, preprocess_input):
|
| 115 |
try:
|
| 116 |
logger.info("📤 Lecture des bytes et conversion en image PIL")
|
|
@@ -151,52 +322,84 @@ def compute_entropy_safe(probas):
|
|
| 151 |
return entropy
|
| 152 |
|
| 153 |
|
| 154 |
-
def
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 155 |
|
| 156 |
-
input_array,raw_input = preprocess_image(image_bytes,config["target_size"],config["preprocess_input"])
|
| 157 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 158 |
logger.info("🤖 Lancement de la prédiction avec le modèle")
|
| 159 |
-
preds = config["
|
| 160 |
-
logger.
|
| 161 |
|
| 162 |
-
predicted_class_index = int(np.argmax(preds
|
| 163 |
-
confidence = float(preds
|
| 164 |
entropy=float(compute_entropy_safe(preds))
|
| 165 |
-
|
| 166 |
-
logger.info(f"✅ Prédiction : classe={predicted_class_index}, confiance={confidence:.4f},entropy={entropy:.4f},is_uncertain_model={is_uncertain_model}")
|
| 167 |
|
| 168 |
-
|
| 169 |
-
"preds": preds
|
| 170 |
"predicted_class": predicted_class_index,
|
| 171 |
"confidence": confidence,
|
| 172 |
-
"entropy":entropy
|
| 173 |
-
"is_uncertain_model":is_uncertain_model
|
| 174 |
}
|
| 175 |
-
if show_heatmap and not is_uncertain_model:
|
| 176 |
-
try:
|
| 177 |
-
logger.info("✅ Début de la génération de la heatmap")
|
| 178 |
-
start_time = time.time()
|
| 179 |
|
| 180 |
-
# Vérification des entrées
|
| 181 |
-
logger.info(f"🖼️ Image d'entrée shape: {raw_input.shape}")
|
| 182 |
-
logger.info(f"🎯 Index de classe prédite: {predicted_class_index}")
|
| 183 |
-
logger.info(f"🛠️ Dernière couche utilisée: {config['last_conv_layer']}")
|
| 184 |
|
| 185 |
-
|
| 186 |
-
|
|
|
|
| 187 |
|
| 188 |
-
|
| 189 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 190 |
|
| 191 |
-
# Conversion en liste pour le JSON
|
| 192 |
-
result["heatmap"] = heatmap.tolist()
|
| 193 |
|
| 194 |
-
|
| 195 |
-
|
| 196 |
-
|
| 197 |
-
|
| 198 |
-
|
| 199 |
-
result["heatmap"] = []
|
| 200 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 201 |
|
| 202 |
-
return result
|
|
|
|
| 6 |
import numpy as np
|
| 7 |
from PIL import Image
|
| 8 |
from keras.applications.efficientnet_v2 import preprocess_input as effnet_preprocess
|
|
|
|
| 9 |
import io
|
| 10 |
from tf_keras_vis.gradcam import Gradcam,GradcamPlusPlus
|
| 11 |
from tf_keras_vis.utils import normalize
|
|
|
|
| 30 |
confidence_threshold=0.55
|
| 31 |
entropy_threshold=2
|
| 32 |
|
| 33 |
+
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class TFLiteDynamicModel:
|
| 37 |
+
def __init__(self, tflite_path, img_size=224):
|
| 38 |
+
logger.info(f"🚀 Chargement du modèle TFLite depuis : {tflite_path}")
|
| 39 |
+
self.img_size = img_size
|
| 40 |
+
self.interpreter = tf.lite.Interpreter(model_path=tflite_path)
|
| 41 |
+
self.interpreter.allocate_tensors()
|
| 42 |
+
|
| 43 |
+
input_details = self.interpreter.get_input_details()
|
| 44 |
+
output_details = self.interpreter.get_output_details()
|
| 45 |
+
|
| 46 |
+
self.input_index = input_details[0]['index']
|
| 47 |
+
self.input_dtype = input_details[0]['dtype']
|
| 48 |
+
self.input_scale, self.input_zero_point = input_details[0]['quantization']
|
| 49 |
+
self.output_index = output_details[0]['index']
|
| 50 |
+
|
| 51 |
+
logger.info(f"🔍 Input tensor index : {self.input_index}, dtype : {self.input_dtype}, scale : {self.input_scale}, zero_point : {self.input_zero_point}")
|
| 52 |
+
logger.info(f"🔍 Output tensor index : {self.output_index}")
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
def preprocess(self, pil_image):
|
| 56 |
+
logger.info(f"🎨 Prétraitement image, redimension à {self.img_size}x{self.img_size}")
|
| 57 |
+
img = pil_image.resize((self.img_size, self.img_size))
|
| 58 |
+
img = np.array(img)
|
| 59 |
+
|
| 60 |
+
# 📸 Gestion des images grayscale ou RGBA
|
| 61 |
+
if img.ndim == 2: # Grayscale -> RGB
|
| 62 |
+
logger.debug("⚪ Image grayscale détectée, conversion en RGB")
|
| 63 |
+
img = np.stack([img] * 3, axis=-1)
|
| 64 |
+
elif img.shape[-1] == 4: # RGBA -> RGB
|
| 65 |
+
logger.debug("🖼️ Image RGBA détectée, suppression canal alpha")
|
| 66 |
+
img = img[..., :3]
|
| 67 |
+
|
| 68 |
+
if self.input_dtype in [np.uint8, np.int8]:
|
| 69 |
+
logger.info("🗜️ Modèle quantifié PTQ détecté (entrée int8 ou uint8)")
|
| 70 |
+
|
| 71 |
+
# Pas de division par 255 ici ! On quantifie directement selon l'échelle et le zéro-point
|
| 72 |
+
img = img.astype(np.float32)
|
| 73 |
+
img = img / self.input_scale + self.input_zero_point
|
| 74 |
+
|
| 75 |
+
# On clip selon le type
|
| 76 |
+
if self.input_dtype == np.uint8:
|
| 77 |
+
img = np.clip(img, 0, 255)
|
| 78 |
+
else: # np.int8
|
| 79 |
+
img = np.clip(img, -128, 127)
|
| 80 |
+
|
| 81 |
+
img = img.astype(self.input_dtype)
|
| 82 |
+
|
| 83 |
+
else:
|
| 84 |
+
logger.info("🌊 Modèle dynamique ou float32 détecté (entrée float32 normalisée)")
|
| 85 |
+
img = img.astype(self.input_dtype) # Normalisation classique
|
| 86 |
+
|
| 87 |
+
input_data = np.expand_dims(img, axis=0)
|
| 88 |
+
logger.info(f"✅ Image prétraitée avec forme {input_data.shape} et dtype {input_data.dtype}")
|
| 89 |
+
return input_data
|
| 90 |
+
|
| 91 |
+
|
| 92 |
+
def preprocess_old(self, pil_image):
|
| 93 |
+
logger.info(f"🎨 Prétraitement image, redimension à {self.img_size}x{self.img_size}")
|
| 94 |
+
img = pil_image.resize((self.img_size, self.img_size))
|
| 95 |
+
img = np.array(img).astype(np.float32)
|
| 96 |
+
|
| 97 |
+
if img.ndim == 2: # grayscale -> RGB
|
| 98 |
+
logger.debug("⚪ Image grayscale détectée, conversion en RGB")
|
| 99 |
+
img = np.stack([img]*3, axis=-1)
|
| 100 |
+
elif img.shape[-1] == 4: # RGBA -> RGB
|
| 101 |
+
logger.debug("🖼️ Image RGBA détectée, suppression canal alpha")
|
| 102 |
+
img = img[..., :3]
|
| 103 |
+
|
| 104 |
+
if self.input_dtype in [np.uint8, np.int8]:
|
| 105 |
+
if self.input_scale > 0:
|
| 106 |
+
logger.debug(f"⚙️ Application quantification dynamique avec scale {self.input_scale} et zero_point {self.input_zero_point}")
|
| 107 |
+
img = img / 255.0
|
| 108 |
+
img = img / self.input_scale + self.input_zero_point
|
| 109 |
+
img = np.clip(img, 0, 255 if self.input_dtype == np.uint8 else 127)
|
| 110 |
+
img = img.astype(self.input_dtype)
|
| 111 |
+
else:
|
| 112 |
+
img = img.astype(self.input_dtype)
|
| 113 |
+
|
| 114 |
+
input_data = np.expand_dims(img, axis=0)
|
| 115 |
+
logger.info(f"✅ Image prétraitée avec forme {input_data.shape} et dtype {input_data.dtype}")
|
| 116 |
+
return input_data
|
| 117 |
+
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
def predict_dyna(self, pil_image):
|
| 121 |
+
logger.info("⚡ Début de prédiction (modèle dynamique ou float32)")
|
| 122 |
+
|
| 123 |
+
# Prétraitement
|
| 124 |
+
logger.info("🔄 Prétraitement de l'image en cours")
|
| 125 |
+
input_data = self.preprocess(pil_image)
|
| 126 |
+
logger.debug(f"✅ Image prétraitée - Shape : {input_data.shape} - Dtype : {input_data.dtype}")
|
| 127 |
+
|
| 128 |
+
# Injection des données dans le modèle
|
| 129 |
+
logger.info("📥 Injection des données dans le modèle")
|
| 130 |
+
self.interpreter.set_tensor(self.input_index, input_data)
|
| 131 |
+
|
| 132 |
+
# Invocation du modèle
|
| 133 |
+
logger.info("🚀 Exécution du modèle TFLite")
|
| 134 |
+
self.interpreter.invoke()
|
| 135 |
+
|
| 136 |
+
# Récupération de la sortie
|
| 137 |
+
logger.info("📤 Récupération des résultats bruts")
|
| 138 |
+
output_data = self.interpreter.get_tensor(self.output_index)
|
| 139 |
+
logger.debug(f"✅ Logits récupérés - Shape : {output_data.shape} - Dtype : {output_data.dtype}")
|
| 140 |
+
|
| 141 |
+
# Calcul des probabilités
|
| 142 |
+
logger.info("🧮 Calcul des probabilités")
|
| 143 |
+
probas=output_data[0]
|
| 144 |
+
logger.debug(f"✅ Probabilités : {probas}")
|
| 145 |
+
|
| 146 |
+
|
| 147 |
+
logger.info("🎯 Prédiction terminée")
|
| 148 |
+
|
| 149 |
+
return probas
|
| 150 |
+
|
| 151 |
+
|
| 152 |
+
def predict_ptq(self, pil_image):
|
| 153 |
+
logger.info("⚡ Début de prédiction")
|
| 154 |
+
|
| 155 |
+
# Prétraitement
|
| 156 |
+
logger.info("🔄 Prétraitement de l'image en cours")
|
| 157 |
+
input_data = self.preprocess(pil_image)
|
| 158 |
+
logger.debug(f"✅ Image prétraitée - Shape : {input_data.shape} - Dtype : {input_data.dtype}")
|
| 159 |
+
|
| 160 |
+
# Injection des données dans le modèle
|
| 161 |
+
logger.info("📥 Injection des données dans le modèle")
|
| 162 |
+
self.interpreter.set_tensor(self.input_index, input_data)
|
| 163 |
+
|
| 164 |
+
# Invocation du modèle
|
| 165 |
+
logger.info("🚀 Exécution du modèle TFLite")
|
| 166 |
+
self.interpreter.invoke()
|
| 167 |
+
|
| 168 |
+
# Récupération de la sortie
|
| 169 |
+
logger.info("📤 Récupération des résultats bruts")
|
| 170 |
+
output_details = self.interpreter.get_output_details()[0]
|
| 171 |
+
output_data = self.interpreter.get_tensor(output_details['index'])
|
| 172 |
+
logger.debug(f"✅ Logits quantifiés récupérés - Shape : {output_data.shape} - Dtype : {output_data.dtype}")
|
| 173 |
+
|
| 174 |
+
# Paramètres de quantification
|
| 175 |
+
output_scale, output_zero_point = output_details['quantization']
|
| 176 |
+
logger.debug(f"ℹ️ Paramètres de quantification - Scale: {output_scale}, Zero Point: {output_zero_point}")
|
| 177 |
+
|
| 178 |
+
# Déquantification
|
| 179 |
+
logger.info("🔓 Déquantification des logits")
|
| 180 |
+
logits = (output_data.astype(np.float32) - output_zero_point) * output_scale
|
| 181 |
+
logger.debug(f"✅ Logits déquantifiés : {logits}")
|
| 182 |
+
|
| 183 |
+
# Calcul des probabilités
|
| 184 |
+
logger.info("🧮 Calcul des probabilités avec softmax")
|
| 185 |
+
probas = logits[0]
|
| 186 |
+
logger.debug(f"✅ Probabilités : {probas}")
|
| 187 |
+
|
| 188 |
+
logger.info("🎯 Prédiction terminée")
|
| 189 |
+
|
| 190 |
+
return probas
|
| 191 |
+
|
| 192 |
+
def predict (self, pil_image):
|
| 193 |
+
if self.input_dtype in [np.uint8, np.int8]:
|
| 194 |
+
logger.info("🗜️ Modèle quantifié PTQ détecté")
|
| 195 |
+
return self.predict_ptq(pil_image)
|
| 196 |
+
else:
|
| 197 |
+
logger.info("🌊 Modèle dynamique ou float32 détecté")
|
| 198 |
+
return self.predict_dyna(pil_image)
|
| 199 |
+
|
| 200 |
+
|
| 201 |
class ModelStruct(TypedDict):
|
| 202 |
model_name: str
|
| 203 |
model: tf.keras.Model
|
| 204 |
gradcam_model:tf.keras.Model
|
| 205 |
+
fast_model:TFLiteDynamicModel
|
| 206 |
preprocess_input: Callable[[np.ndarray], Any]
|
| 207 |
target_size: tuple[int, int]
|
| 208 |
last_conv_layer:str
|
|
|
|
| 215 |
if _model_cache is None:
|
| 216 |
print("📦 Chargement du modèle EfficientNetV2M...")
|
| 217 |
model = tf.keras.models.load_model("model/best_efficientnetv2m_gradcam.keras", compile=False)
|
| 218 |
+
fast_model=TFLiteDynamicModel("model/efficientnetv2m_float16.tflite", img_size=480)
|
| 219 |
|
| 220 |
_model_cache = [{
|
| 221 |
"model_name": "EfficientNetV2M",
|
| 222 |
"model": model,
|
| 223 |
"gradcam_model": model,
|
| 224 |
+
"fast_model":fast_model,
|
| 225 |
"preprocess_input": effnet_preprocess,
|
| 226 |
"target_size": (480, 480),
|
| 227 |
"last_conv_layer": "block7a_expand_conv",
|
|
|
|
| 281 |
return cam
|
| 282 |
|
| 283 |
|
| 284 |
+
|
| 285 |
def preprocess_image(image_bytes, target_size, preprocess_input):
|
| 286 |
try:
|
| 287 |
logger.info("📤 Lecture des bytes et conversion en image PIL")
|
|
|
|
| 322 |
return entropy
|
| 323 |
|
| 324 |
|
| 325 |
+
def get_heatmap(config, image_bytes: bytes,predicted_class_index):
|
| 326 |
+
result={}
|
| 327 |
+
try:
|
| 328 |
+
_,raw_input = preprocess_image(image_bytes,config["target_size"],config["preprocess_input"])
|
| 329 |
+
logger.info("✅ Début de la génération de la heatmap")
|
| 330 |
+
start_time = time.time()
|
| 331 |
+
|
| 332 |
+
# Vérification des entrées
|
| 333 |
+
logger.info(f"🖼️ Image d'entrée shape: {raw_input.shape}")
|
| 334 |
+
logger.info(f"🎯 Index de classe prédite: {predicted_class_index}")
|
| 335 |
+
logger.info(f"🛠️ Dernière couche utilisée: {config['last_conv_layer']}")
|
| 336 |
+
|
| 337 |
+
# Calcul de la heatmap
|
| 338 |
+
heatmap = compute_gradcam(config["gradcam_model"], raw_input, class_index=predicted_class_index, layer_name=config["last_conv_layer"],gradcam_type=config["gradcam_type"])
|
| 339 |
+
|
| 340 |
+
elapsed_time = time.time() - start_time
|
| 341 |
+
logger.info(f"✅ Heatmap générée en {elapsed_time:.2f} secondes")
|
| 342 |
+
|
| 343 |
+
# Conversion en liste pour le JSON
|
| 344 |
+
result["heatmap"] = heatmap.tolist()
|
| 345 |
+
|
| 346 |
+
except Exception as e:
|
| 347 |
+
logger.error(f"❌ Erreur lors de la génération de la heatmap: {e}")
|
| 348 |
+
result["heatmap"] = []
|
| 349 |
+
return result
|
| 350 |
+
|
| 351 |
+
def predict_with_model(config, image_bytes: bytes):
|
| 352 |
|
| 353 |
+
#input_array,raw_input = preprocess_image(image_bytes,config["target_size"],config["preprocess_input"])
|
| 354 |
|
| 355 |
+
try:
|
| 356 |
+
logger.info("📤 Lecture des bytes et conversion en image PIL")
|
| 357 |
+
image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
|
| 358 |
+
except Exception as e:
|
| 359 |
+
logger.exception("❌ Erreur lors de l'ouverture de l'image")
|
| 360 |
+
raise ValueError("Impossible de décoder l'image") from e
|
| 361 |
logger.info("🤖 Lancement de la prédiction avec le modèle")
|
| 362 |
+
preds = config["fast_model"].predict(image)
|
| 363 |
+
logger.info(f"📈 Prédictions brutes : {preds.tolist()}")
|
| 364 |
|
| 365 |
+
predicted_class_index = int(np.argmax(preds))
|
| 366 |
+
confidence = float(np.max(preds))
|
| 367 |
entropy=float(compute_entropy_safe(preds))
|
| 368 |
+
logger.info(f"✅ Prédiction : classe={predicted_class_index}, confiance={confidence:.4f},entropy={entropy:.4f}")
|
|
|
|
| 369 |
|
| 370 |
+
return {
|
| 371 |
+
"preds": preds.tolist(),
|
| 372 |
"predicted_class": predicted_class_index,
|
| 373 |
"confidence": confidence,
|
| 374 |
+
"entropy":entropy
|
|
|
|
| 375 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
| 376 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 377 |
|
| 378 |
+
def predict_with_model_old(config, image_bytes: bytes):
|
| 379 |
+
|
| 380 |
+
#input_array,raw_input = preprocess_image(image_bytes,config["target_size"],config["preprocess_input"])
|
| 381 |
|
| 382 |
+
try:
|
| 383 |
+
logger.info("📤 Lecture des bytes et conversion en image PIL")
|
| 384 |
+
image = Image.open(io.BytesIO(image_bytes)).convert("RGB")
|
| 385 |
+
except Exception as e:
|
| 386 |
+
logger.exception("❌ Erreur lors de l'ouverture de l'image")
|
| 387 |
+
raise ValueError("Impossible de décoder l'image") from e
|
| 388 |
+
logger.info("🤖 Lancement de la prédiction avec le modèle")
|
| 389 |
+
preds = config["fast_model"].predict(image)
|
| 390 |
+
logger.info(f"📈 Prédictions brutes : {preds[0].tolist()}")
|
| 391 |
|
|
|
|
|
|
|
| 392 |
|
| 393 |
+
|
| 394 |
+
predicted_class_index = int(np.argmax(preds[0]))
|
| 395 |
+
confidence = float(preds[0][predicted_class_index])
|
| 396 |
+
entropy=float(compute_entropy_safe(preds))
|
| 397 |
+
logger.info(f"✅ Prédiction : classe={predicted_class_index}, confiance={confidence:.4f},entropy={entropy:.4f}")
|
|
|
|
| 398 |
|
| 399 |
+
return {
|
| 400 |
+
"preds": preds[0].tolist(),
|
| 401 |
+
"predicted_class": predicted_class_index,
|
| 402 |
+
"confidence": confidence,
|
| 403 |
+
"entropy":entropy
|
| 404 |
+
}
|
| 405 |
|
|
|
app/utils.py
CHANGED
|
@@ -30,7 +30,7 @@ def register_with_orchestrator():
|
|
| 30 |
logger.info(f"📡 Tentative d'enregistrement de {MODEL_NAME} à l'orchestrateur...")
|
| 31 |
response = requests.post(
|
| 32 |
f"{ORCHESTRATOR_URL}/register_model",
|
| 33 |
-
json={"model_name": MODEL_NAME, "model_type": MODEL_TYPE,"url": f"{OWN_URL}
|
| 34 |
)
|
| 35 |
if response.status_code == 200:
|
| 36 |
logger.info("✅ Modèle enregistré avec succès")
|
|
|
|
| 30 |
logger.info(f"📡 Tentative d'enregistrement de {MODEL_NAME} à l'orchestrateur...")
|
| 31 |
response = requests.post(
|
| 32 |
f"{ORCHESTRATOR_URL}/register_model",
|
| 33 |
+
json={"model_name": MODEL_NAME, "model_type": MODEL_TYPE,"url": f"{OWN_URL}"}
|
| 34 |
)
|
| 35 |
if response.status_code == 200:
|
| 36 |
logger.info("✅ Modèle enregistré avec succès")
|
model/efficientnetv2m_float16.tflite
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9f42e7fa1fa94a3115df85c6721b1ebef563feca717fba1922ba632dcc658794
|
| 3 |
+
size 106340288
|