screencomply_documents / backend /utils /submission_logger.py
misakovhearst
Initial deploy
48c7fed
import json
import os
from datetime import datetime, timezone
# Log file lives at the project root (next to run.bat / run.sh)
_LOG_PATH = os.path.join(
os.path.dirname(__file__), # backend/utils/
"..", "..", # → project root
"submissions.jsonl"
)
_LOG_PATH = os.path.normpath(_LOG_PATH)
def log_submission(
filename: str,
overall_ai_score: float,
overall_confidence: str,
status_label: str,
detector_results: dict,
text_stats: dict,
text_preview: str = "",
) -> None:
"""
Append one JSON line to submissions.jsonl for every analysis.
Safe to call from multiple threads — each write is a single os.write.
"""
record = {
"timestamp": datetime.now(timezone.utc).isoformat(),
"filename": filename,
"overall_ai_score": round(overall_ai_score, 4),
"overall_confidence": overall_confidence,
"status_label": status_label,
"text_preview": text_preview[:200],
"text_stats": text_stats,
"detectors": {
name: {
"score": round(res.get("score", 0), 4),
"confidence": res.get("confidence"),
"prediction": res.get("metadata", {}).get("prediction"),
"ai_probability": round(res.get("metadata", {}).get("ai_probability", res.get("score", 0)), 4),
"human_probability": round(res.get("metadata", {}).get("human_probability", 1 - res.get("score", 0)), 4),
}
for name, res in detector_results.items()
},
}
line = json.dumps(record, ensure_ascii=False) + "\n"
with open(_LOG_PATH, "a", encoding="utf-8") as f:
f.write(line)