Spaces:
Sleeping
Sleeping
File size: 2,036 Bytes
87ab716 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 | from flask import Flask, request, jsonify, send_from_directory
from flask_cors import CORS
import joblib
import requests
import os
from dotenv import load_dotenv
# Load environment variables
load_dotenv()
app = Flask(__name__, static_folder='.', static_url_path='')
CORS(app) # Enable CORS for all routes
# Load model and vectorizer
try:
model = joblib.load("spam_model.pkl")
vectorizer = joblib.load("vectorizer.pkl")
print("Model and vectorizer loaded successfully.")
except Exception as e:
print(f"Error loading model: {e}")
model = None
vectorizer = None
N8N_URL = os.getenv("N8N_WEBHOOK_URL")
@app.route('/')
def serve_frontend():
return send_from_directory('.', 'index.html')
@app.route('/<path:path>')
def serve_static(path):
return send_from_directory('.', path)
@app.route('/predict', methods=['POST'])
def predict():
if not model or not vectorizer:
return jsonify({"error": "Model not loaded"}), 500
data = request.get_json(force=True)
message = data.get('message', '')
if not message:
return jsonify({"error": "No message provided"}), 400
# Vectorize input
vectorized_message = vectorizer.transform([message])
# Predict
prediction = model.predict(vectorized_message)[0]
# Get probabilities
probabilities = model.predict_proba(vectorized_message)
confidence = max(probabilities[0])
result = {
"prediction": prediction,
"confidence": float(confidence),
"message": message
}
# Send to N8N (Fire and forget, or wait for response?)
if N8N_URL:
try:
print(f"Sending to N8N: {N8N_URL}")
requests.post(N8N_URL, json=result, timeout=5) # Add timeout
except Exception as e:
print(f"Failed to send to N8N: {e}")
return jsonify(result)
if __name__ == '__main__':
app.run(debug=True, host='0.0.0.0', port=7860) # Port 7860 is standard for HF Spaces
|