Spaces:
Runtime error
Runtime error
Delete main.py
Browse files
main.py
DELETED
|
@@ -1,123 +0,0 @@
|
|
| 1 |
-
from fastapi import FastAPI, UploadFile, File
|
| 2 |
-
from fastapi.responses import JSONResponse
|
| 3 |
-
from fastapi.staticfiles import StaticFiles
|
| 4 |
-
from fastapi.middleware.cors import CORSMiddleware
|
| 5 |
-
|
| 6 |
-
from PyPDF2 import PdfReader
|
| 7 |
-
import docx
|
| 8 |
-
from docx.enum.text import WD_COLOR_INDEX
|
| 9 |
-
from io import BytesIO
|
| 10 |
-
import os
|
| 11 |
-
import uvicorn
|
| 12 |
-
|
| 13 |
-
app = FastAPI(
|
| 14 |
-
title="StealthWriter",
|
| 15 |
-
description="Detect AI content in documents and generate reports.",
|
| 16 |
-
version="1.0.0",
|
| 17 |
-
docs_url="/docs", # Swagger UI
|
| 18 |
-
redoc_url="/redoc", # Redoc UI
|
| 19 |
-
openapi_url="/openapi.json"
|
| 20 |
-
)
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
# ✅ CORS
|
| 24 |
-
app.add_middleware(
|
| 25 |
-
CORSMiddleware,
|
| 26 |
-
allow_origins=["http://localhost:5173", "https://stealth-writer.vercel.app/"],
|
| 27 |
-
allow_credentials=True,
|
| 28 |
-
allow_methods=["*"],
|
| 29 |
-
allow_headers=["*"],
|
| 30 |
-
)
|
| 31 |
-
|
| 32 |
-
# === Global lazy-load model vars ===
|
| 33 |
-
analyze_text = None
|
| 34 |
-
generate_pdf_report = None
|
| 35 |
-
|
| 36 |
-
@app.on_event("startup")
|
| 37 |
-
async def load_model():
|
| 38 |
-
global analyze_text, generate_pdf_report
|
| 39 |
-
from detector.custom_model import analyze_text as at, generate_pdf_report as gpr
|
| 40 |
-
analyze_text = at
|
| 41 |
-
generate_pdf_report = gpr
|
| 42 |
-
|
| 43 |
-
# === Paths ===
|
| 44 |
-
REPORTS_DIR = os.path.join(os.path.dirname(__file__), "reports")
|
| 45 |
-
os.makedirs(REPORTS_DIR, exist_ok=True)
|
| 46 |
-
|
| 47 |
-
app.mount("/reports", StaticFiles(directory=REPORTS_DIR), name="reports")
|
| 48 |
-
|
| 49 |
-
# === File text extraction ===
|
| 50 |
-
def extract_text(file: UploadFile, ext: str) -> str:
|
| 51 |
-
content = file.file.read()
|
| 52 |
-
file_bytes = BytesIO(content)
|
| 53 |
-
|
| 54 |
-
if ext == ".txt":
|
| 55 |
-
return content.decode("utf-8", errors="ignore")
|
| 56 |
-
elif ext == ".pdf":
|
| 57 |
-
reader = PdfReader(file_bytes)
|
| 58 |
-
return "".join([page.extract_text() or "" for page in reader.pages])
|
| 59 |
-
elif ext == ".docx":
|
| 60 |
-
doc = docx.Document(file_bytes)
|
| 61 |
-
return "\n".join([para.text for para in doc.paragraphs])
|
| 62 |
-
else:
|
| 63 |
-
raise ValueError("Unsupported file type")
|
| 64 |
-
|
| 65 |
-
# === Main endpoint ===
|
| 66 |
-
@app.post("/api/detect")
|
| 67 |
-
async def detect(file: UploadFile = File(...)):
|
| 68 |
-
try:
|
| 69 |
-
ext = os.path.splitext(file.filename)[1].lower()
|
| 70 |
-
if ext not in [".txt", ".pdf", ".docx"]:
|
| 71 |
-
raise ValueError("Unsupported file format")
|
| 72 |
-
|
| 73 |
-
# Extract + Analyze
|
| 74 |
-
text = extract_text(file, ext)
|
| 75 |
-
result = analyze_text(text)
|
| 76 |
-
|
| 77 |
-
# === Save DOCX report ===
|
| 78 |
-
filename_base = os.path.splitext(file.filename)[0]
|
| 79 |
-
docx_filename = f"{filename_base}_report.docx"
|
| 80 |
-
docx_path = os.path.join(REPORTS_DIR, docx_filename)
|
| 81 |
-
|
| 82 |
-
doc = docx.Document()
|
| 83 |
-
doc.add_heading("AI Detection Summary", level=1)
|
| 84 |
-
doc.add_paragraph(f"Overall AI %: {result['overall_ai_percent']}%")
|
| 85 |
-
doc.add_paragraph(f"Total Sentences: {result['total_sentences']}")
|
| 86 |
-
doc.add_paragraph(f"AI Sentences: {result['ai_sentences']}")
|
| 87 |
-
doc.add_paragraph("Sentences detected as AI are highlighted in cyan.\n")
|
| 88 |
-
doc.add_heading("Sentence Analysis", level=2)
|
| 89 |
-
|
| 90 |
-
paragraph = doc.add_paragraph()
|
| 91 |
-
for para in result["results"]:
|
| 92 |
-
for sentence, is_ai, _ in para:
|
| 93 |
-
if not isinstance(sentence, str) or not sentence.strip():
|
| 94 |
-
continue
|
| 95 |
-
run = paragraph.add_run(sentence + " ")
|
| 96 |
-
if is_ai:
|
| 97 |
-
run.font.highlight_color = WD_COLOR_INDEX.TURQUOISE
|
| 98 |
-
|
| 99 |
-
doc.save(docx_path)
|
| 100 |
-
|
| 101 |
-
# === Save PDF report (uses ReportLab) ===
|
| 102 |
-
pdf_filename = generate_pdf_report(result, filename_base)
|
| 103 |
-
|
| 104 |
-
return {
|
| 105 |
-
"success": True,
|
| 106 |
-
"score": {
|
| 107 |
-
**{k: v for k, v in result.items() if k != "results"},
|
| 108 |
-
"results": [
|
| 109 |
-
[{"sentence": s, "is_ai": is_ai, "ai_score": round(ai_score * 100, 2)} for s, is_ai, ai_score in para]
|
| 110 |
-
for para in result["results"]
|
| 111 |
-
]
|
| 112 |
-
},
|
| 113 |
-
"docx_url": f"/reports/{docx_filename}",
|
| 114 |
-
"pdf_url": f"/reports/{pdf_filename}"
|
| 115 |
-
}
|
| 116 |
-
|
| 117 |
-
except Exception as e:
|
| 118 |
-
return JSONResponse(content={"success": False, "error": str(e)}, status_code=500)
|
| 119 |
-
|
| 120 |
-
# === Port binding for Render ===
|
| 121 |
-
if __name__ == "__main__":
|
| 122 |
-
port = int(os.environ.get("PORT", 7860)) # Render will inject PORT
|
| 123 |
-
uvicorn.run("main:app", host="0.0.0.0", port=port)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|