Anwaree commited on
Commit
483e6f5
·
verified ·
1 Parent(s): 1f3beb7

train_spam_model.py

Browse files

# -----------------------------
# 1️⃣ Import des librairies
# -----------------------------
import pandas as pd
import re
import joblib
import nltk
from nltk.corpus import stopwords
from nltk.stem import PorterStemmer
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import classification_report, confusion_matrix, accuracy_score, roc_auc_score
from imblearn.over_sampling import SMOTE

# Télécharger stopwords si nécessaire
nltk.download('stopwords')

# -----------------------------
# 2️⃣ Prétraitement des messages
# -----------------------------
stop_words = set(stopwords.words('english'))
stemmer = PorterStemmer()

def preprocess_message(text):
if pd.isna(text):
return ""
text = text.lower()
text = re.sub(r'http\S+|www\S+', '', text) # supprimer URLs
text = re.sub(r'\S+@\S+', '', text) # supprimer emails
text = re.sub(r'\+?\d[\d -]{8,}\d', '', text) # supprimer numéros
text = re.sub(r'\d+', '', text) # supprimer chiffres
text = re.sub(r'[^a-z\s!/+>]', '', text) # garder ponctuation utile spam
words = [stemmer.stem(word) for word in text.split() if word not in stop_words]
return " ".join(words)

# -----------------------------
# 3️⃣ Charger les données
# -----------------------------
# data doit avoir les colonnes "Message" et "Category" ('spam'/'ham')
data = pd.read_csv("data.csv")
data['cleaned'] = data['Message'].apply(preprocess_message)

X = data['cleaned']
y = data['Category']

# Split train/test stratifié
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=42, stratify=y
)

# -----------------------------
# 4️⃣ Vectorisation TF-IDF
# -----------------------------
tfidf = TfidfVectorizer(
max_features=5000,
min_df=2,
max_df=0.95,
ngram_range=(1,2),
token_pattern=r'(?u)\b\w+\b|[!/+>]' # capture mots et ponctuations importantes
)
X_train_tfidf = tfidf.fit_transform(X_train)
X_test_tfidf = tfidf.transform(X_test)

# -----------------------------
# 5️⃣ Équilibrage des classes avec SMOTE
# -----------------------------
smote = SMOTE(random_state=42)
X_train_balanced, y_train_balanced = smote.fit_resample(X_train_tfidf, y_train)

# -----------------------------
# 6️⃣ Entraînement du modèle Logistic Regression
# -----------------------------
model = LogisticRegression(random_state=42, max_iter=1000)
model.fit(X_train_balanced, y_train_balanced)

# -----------------------------
# 7️⃣ Évaluation rapide
# -----------------------------
y_pred = model.predict(X_test_tfidf)
print("Classification Report:\n", classification_report(y_test, y_pred))
print("Matrice de confusion:\n", confusion_matrix(y_test, y_pred))
accuracy = accuracy_score(y_test, y_pred)
print(f"Accuracy: {accuracy:.4f}")

if hasattr(model, 'predict_proba'):
y_test_binary = (y_test == 'spam').astype(int)
auc = roc_auc_score(y_test_binary, model.predict_proba(X_test_tfidf)[:,1])
print(f"AUC-ROC: {auc:.4f}")

# -----------------------------
# 8️⃣ Sauvegarder modèle et TF-IDF
# -----------------------------
joblib.dump(model, "spam_model.pkl")
joblib.dump(tfidf, "tfidf_vectorizer.pkl")
print("✅ Modèle Logistic Regression et TF-IDF vectorizer sauvegardés avec succès !")

Files changed (1) hide show
  1. app.py +67 -0
app.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import joblib
3
+ import re
4
+ import pandas as pd
5
+ from nltk.corpus import stopwords
6
+ from nltk.stem import PorterStemmer
7
+ import nltk
8
+
9
+ # -----------------------------
10
+ # 1️⃣ Prétraitement
11
+ # -----------------------------
12
+ nltk.download('stopwords')
13
+ stop_words = set(stopwords.words('english'))
14
+ stemmer = PorterStemmer()
15
+
16
+
17
+
18
+ def preprocess_message(text):
19
+ """
20
+ Prétraitement générique pour messages inconnus (nouveaux messages à prédire)
21
+ : garde ponctuation utile pour spam
22
+ """
23
+ if pd.isna(text):
24
+ return ""
25
+ text = text.lower()
26
+ text = re.sub(r'http\S+|www\S+', '', text)
27
+ text = re.sub(r'\S+@\S+', '', text)
28
+ text = re.sub(r'\+?\d[\d -]{8,}\d', '', text)
29
+ text = re.sub(r'\d+', '', text)
30
+ # garder ponctuation typique spam
31
+ text = re.sub(r'[^a-z\s!/+>]', '', text)
32
+ words = [stemmer.stem(word) for word in text.split() if word not in stop_words]
33
+ return " ".join(words)
34
+
35
+ # -----------------------------
36
+ # 2️⃣ Chargement du modèle
37
+ # -----------------------------
38
+ model = joblib.load("spam_model.pkl")
39
+ vectorizer = joblib.load("tfidf_vectorizer.pkl")
40
+
41
+ # -----------------------------
42
+ # 3️⃣ Fonction de prédiction
43
+ # -----------------------------
44
+ def predict_message(message):
45
+ cleaned = preprocess_message(message)
46
+ X = vectorizer.transform([cleaned])
47
+ prediction = model.predict(X)[0]
48
+ probability = model.predict_proba(X)[0][1] if hasattr(model, 'predict_proba') else None
49
+ return {
50
+ "Message": message,
51
+ "Prediction": prediction,
52
+ "Spam Probability": round(float(probability), 4) if probability is not None else None
53
+ }
54
+
55
+ # -----------------------------
56
+ # 4️⃣ Interface Gradio
57
+ # -----------------------------
58
+ iface = gr.Interface(
59
+ fn=predict_message,
60
+ inputs=gr.Textbox(lines=3, placeholder="Entrez votre message ici..."),
61
+ outputs="json",
62
+ title="📩 Spam Detector",
63
+ description="Un modèle ML qui détecte si un message est SPAM ou HAM."
64
+ )
65
+
66
+ if __name__ == "__main__":
67
+ iface.launch()