Spaces:
Sleeping
Sleeping
Upload 2 files
Browse files
model.py
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import joblib
|
| 2 |
+
from sklearn.feature_extraction.text import TfidfVectorizer
|
| 3 |
+
from sklearn.decomposition import TruncatedSVD
|
| 4 |
+
|
| 5 |
+
# Define file paths
|
| 6 |
+
MODEL_PATH = r"C:\Users\LENOVO\Desktop\ai-content-detector-extension\models\random_forest_model.pkl"
|
| 7 |
+
VECTORIZER_PATH = r"C:\Users\LENOVO\Desktop\ai-content-detector-extension\models\vectorizer.pkl"
|
| 8 |
+
SVD_PATH = r"C:\Users\LENOVO\Desktop\ai-content-detector-extension\models\svd.pkl"
|
| 9 |
+
|
| 10 |
+
try:
|
| 11 |
+
# Load the trained model, vectorizer, and SVD transformer
|
| 12 |
+
model = joblib.load(MODEL_PATH)
|
| 13 |
+
vectorizer = joblib.load(VECTORIZER_PATH)
|
| 14 |
+
svd = joblib.load(SVD_PATH)
|
| 15 |
+
print("✅ Model, vectorizer, and SVD loaded successfully!")
|
| 16 |
+
except Exception as e:
|
| 17 |
+
print(f"❌ Error loading model files: {e}")
|
| 18 |
+
exit(1)
|
| 19 |
+
|
| 20 |
+
def predict_text(text: str) -> int:
|
| 21 |
+
"""Preprocess input text and predict using trained model."""
|
| 22 |
+
try:
|
| 23 |
+
X_tfidf = vectorizer.transform([text]) # Convert to TF-IDF
|
| 24 |
+
X_reduced = svd.transform(X_tfidf) # Apply dimensionality reduction
|
| 25 |
+
prediction = model.predict(X_reduced)[0] # Predict label (0 or 1)
|
| 26 |
+
|
| 27 |
+
probability = float(model.predict_proba(X_reduced)[0][1])
|
| 28 |
+
print(probability) #printing probability
|
| 29 |
+
|
| 30 |
+
return {"generated": int(prediction), "probability": probability}
|
| 31 |
+
except Exception as e:
|
| 32 |
+
print(f"❌ Prediction error: {e}")
|
| 33 |
+
return {"generated": -1, "probability": 0.0}
|
| 34 |
+
|
server.py
ADDED
|
@@ -0,0 +1,33 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import FastAPI
|
| 2 |
+
from pydantic import BaseModel
|
| 3 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 4 |
+
from api.model import predict_text
|
| 5 |
+
|
| 6 |
+
app = FastAPI(title="AI Content Detector API")
|
| 7 |
+
|
| 8 |
+
app.add_middleware(
|
| 9 |
+
CORSMiddleware,
|
| 10 |
+
allow_origins=["chrome-extension://*"], # Allow all Chrome extensions
|
| 11 |
+
allow_credentials=True,
|
| 12 |
+
allow_methods=["*"], # Allow all HTTP methods (GET, POST, etc.)
|
| 13 |
+
allow_headers=["*"], # Allow all headers
|
| 14 |
+
)
|
| 15 |
+
|
| 16 |
+
class TextInput(BaseModel):
|
| 17 |
+
text: str
|
| 18 |
+
|
| 19 |
+
@app.post("/predict/")
|
| 20 |
+
async def predict(input_data: TextInput):
|
| 21 |
+
"""Receive text and return AI detection result."""
|
| 22 |
+
result = predict_text(input_data.text)
|
| 23 |
+
|
| 24 |
+
print(f"RESULT\n{result}")
|
| 25 |
+
|
| 26 |
+
return {
|
| 27 |
+
"generated": result["generated"],
|
| 28 |
+
"probability": result["probability"]
|
| 29 |
+
}
|
| 30 |
+
|
| 31 |
+
# if __name__ == "__main__":
|
| 32 |
+
# import uvicorn
|
| 33 |
+
# uvicorn.run(app, host="0.0.0.0", port=8000)
|