Spaces:
Runtime error
Runtime error
File size: 5,766 Bytes
c2712f2 e9899bd c2712f2 e9899bd c2712f2 e9899bd c2712f2 e9899bd c2712f2 551257e c2712f2 e9899bd c2712f2 e9899bd c2712f2 551257e c2712f2 e9899bd c2712f2 e9899bd c2712f2 e9899bd c2712f2 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 |
import joblib
import pandas as pd
import numpy as np
from flask import Flask, request, jsonify
from flask_cors import CORS
import os
import logging
import threading
import time
from tqdm import tqdm
from tenacity import retry, wait_fixed, stop_after_attempt
from sklearn.preprocessing import StandardScaler, LabelEncoder
from sklearn.impute import SimpleImputer
from sklearn.model_selection import StratifiedKFold
from sklearn.metrics import f1_score, recall_score
from imblearn.over_sampling import SMOTE
from imblearn.under_sampling import RandomUnderSampler
from imblearn.pipeline import Pipeline
from xgboost import XGBClassifier
from lightgbm import LGBMClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.linear_model import LogisticRegression
from sklearn.svm import SVC
# Initialiser l'application Flask
app = Flask(__name__)
CORS(app)
# Chemins vers les fichiers de modèle
PRIORITY_MODEL_PATH = 'priority_model.pkl'
SERVICE_MODEL_PATH = 'service_model.pkl'
DATASET_PATH = "my_datasheet_80000.csv"
NEW_DATA_FILE = 'new_data.csv'
MIN_NEW_SAMPLES_FOR_RETRAIN = 100
# Variables globales pour les modèles
priority_model = None
service_model = None
priority_scaler = None
service_scaler = None
priority_imputer = None
service_imputer = None
label_encoder_service = LabelEncoder()
model_lock = threading.Lock()
# Initialiser le logger
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
# Fonction pour charger les modèles
def load_models():
global priority_model, service_model, priority_scaler, service_scaler, priority_imputer, service_imputer, label_encoder_service
if os.path.exists(PRIORITY_MODEL_PATH):
priority_model = joblib.load(PRIORITY_MODEL_PATH)
if os.path.exists(SERVICE_MODEL_PATH):
service_model = joblib.load(SERVICE_MODEL_PATH)
priority_scaler = joblib.load('priority_scaler.pkl')
service_scaler = joblib.load('service_scaler.pkl')
priority_imputer = joblib.load('priority_imputer.pkl')
service_imputer = joblib.load('service_imputer.pkl')
label_encoder_service = joblib.load('label_encoder_service.pkl')
# Charger les modèles au démarrage
load_models()
# Fonctions et routes Flask
@app.route('/predict', methods=['POST'])
def predict():
global priority_model, service_model
if priority_model is None or service_model is None:
load_models()
try:
data = request.get_json()
required_fields = ['age', 'sexe', 'enceinte', 'spo2', 'freq_resp', 'pouls', 'ecg', 'pa', 'temperature', 'imc']
missing_fields = [field for field in required_fields if field not in data]
if missing_fields:
return jsonify({'error': f'Missing fields: {", ".join(missing_fields)}'}), 400
input_data = {
'Age': float(data['age']),
'Sexe': 0 if data['sexe'].lower() == 'masculin' else 1,
'Enceinte': 1 if bool(data['enceinte']) else 0,
'SpO2': float(data['spo2']),
'Frquce_Rprtr(rpm)': float(data['freq_resp']),
'Pouls': float(data['pouls']),
'ECG': 0 if data['ecg'].lower() == 'normal' else 1,
'PA': float(data['pa']),
'Temperature': float(data['temperature']),
'IMC': float(data['imc']),
}
input_df = pd.DataFrame([input_data])
input_df = enhanced_features(input_df)
suggested_service, suggested_priority = compute_service_and_priority(input_df.iloc[0])
input_df['Suggested_Priority'] = suggested_priority
with model_lock:
# Priority prediction
priority_input = input_df[PRIORITY_FEATURES]
priority_imputed = priority_imputer.transform(priority_input)
priority_scaled = priority_scaler.transform(priority_imputed)
priority_probs = priority_model.predict_proba(priority_scaled)[0]
priority_pred = np.argmax(priority_probs) + 1
priority_conf = float(max(priority_probs))
# Service prediction
service_input = input_df[SERVICE_FEATURES]
service_imputed = service_imputer.transform(service_input)
service_scaled = service_scaler.transform(service_imputed)
service_probs = service_model.predict_proba(service_scaled)[0]
service_pred_idx = np.argmax(service_probs)
service_pred = label_encoder_service.inverse_transform([service_pred_idx])[0]
service_conf = float(max(service_probs))
# Fallback to rule-based logic if confidence is low or critical conditions apply
if priority_conf < 0.7 or input_df['Critical_Signs'][0] == 1:
priority_pred = suggested_priority
if service_conf < 0.7 or input_df['Enceinte'][0] == 1:
service_pred = suggested_service if input_df['Enceinte'][0] == 0 else 'Gynécologie/Obstétrique'
input_df['Priorite'] = priority_pred
input_df['Service_Suivant'] = service_pred
if not os.path.exists(NEW_DATA_FILE):
input_df.to_csv(NEW_DATA_FILE, index=False)
else:
input_df.to_csv(NEW_DATA_FILE, mode='a', header=False, index=False)
logger.info(f"Predicted: service={service_pred}, priority={priority_pred}, service_conf={service_conf}, priority_conf={priority_conf}")
return jsonify({
'priority': int(priority_pred),
'service_suivant': service_pred,
'priority_confidence': priority_conf,
'service_confidence': service_conf
})
except Exception as e:
logger.error(f"Prediction error: {str(e)}")
return jsonify({'error': str(e)}), 500
if __name__ == '__main__':
app.run(debug=False, host='0.0.0.0', port=5000)
|