Optimize: Force CPU mode and add low memory usage for HF Spaces
Browse files
app.py
CHANGED
|
@@ -11,7 +11,7 @@ import os
|
|
| 11 |
# Configuration
|
| 12 |
MODEL_PATH = "models"
|
| 13 |
MAX_LENGTH = 256
|
| 14 |
-
DEVICE = torch.device("
|
| 15 |
|
| 16 |
print("="*60)
|
| 17 |
print("🚀 Initialisation du Détecteur de Phishing")
|
|
@@ -29,7 +29,12 @@ print(f"📥 Chargement du tokenizer depuis {MODEL_PATH}...")
|
|
| 29 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
|
| 30 |
|
| 31 |
print(f"📥 Chargement du modèle depuis {MODEL_PATH}...")
|
| 32 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 33 |
model.to(DEVICE)
|
| 34 |
model.eval()
|
| 35 |
|
|
@@ -234,8 +239,7 @@ if __name__ == "__main__":
|
|
| 234 |
print("="*60 + "\n")
|
| 235 |
|
| 236 |
demo.launch(
|
| 237 |
-
|
| 238 |
-
|
| 239 |
-
|
| 240 |
-
show_error=True
|
| 241 |
)
|
|
|
|
| 11 |
# Configuration
|
| 12 |
MODEL_PATH = "models"
|
| 13 |
MAX_LENGTH = 256
|
| 14 |
+
DEVICE = torch.device("cpu") # Force CPU pour Hugging Face Spaces gratuit
|
| 15 |
|
| 16 |
print("="*60)
|
| 17 |
print("🚀 Initialisation du Détecteur de Phishing")
|
|
|
|
| 29 |
tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
|
| 30 |
|
| 31 |
print(f"📥 Chargement du modèle depuis {MODEL_PATH}...")
|
| 32 |
+
# Charger en mode CPU avec optimisations mémoire
|
| 33 |
+
model = AutoModelForSequenceClassification.from_pretrained(
|
| 34 |
+
MODEL_PATH,
|
| 35 |
+
torch_dtype=torch.float32, # Utiliser float32 pour compatibilité CPU
|
| 36 |
+
low_cpu_mem_usage=True # Optimisation mémoire
|
| 37 |
+
)
|
| 38 |
model.to(DEVICE)
|
| 39 |
model.eval()
|
| 40 |
|
|
|
|
| 239 |
print("="*60 + "\n")
|
| 240 |
|
| 241 |
demo.launch(
|
| 242 |
+
show_error=True,
|
| 243 |
+
server_name="0.0.0.0", # Nécessaire pour Hugging Face Spaces
|
| 244 |
+
server_port=7860
|
|
|
|
| 245 |
)
|