Spaces:
Runtime error
Runtime error
Commit ·
66c2f04
0
Parent(s):
initial: stateless architecture with automated model downloads
Browse files- .env.template +26 -0
- .gitignore +40 -0
- Dockerfile +46 -0
- README.md +136 -0
- app/api/chat.py +29 -0
- app/api/exams.py +16 -0
- app/api/proctoring.py +236 -0
- app/api/router.py +10 -0
- app/core/config.py +55 -0
- app/core/logging_config.py +56 -0
- app/schemas/__init__.py +46 -0
- app/schemas/chat.py +27 -0
- app/schemas/common.py +8 -0
- app/schemas/exam.py +31 -0
- app/schemas/proctoring.py +128 -0
- app/services/chat_service.py +69 -0
- app/services/exam_service.py +87 -0
- app/services/face_detection_service.py +72 -0
- app/services/face_recognition_service.py +153 -0
- app/services/object_detection_service.py +181 -0
- app/utils/__init__.py +1 -0
- app/utils/image_utils.py +125 -0
- main.py +88 -0
- requirements.txt +16 -0
.env.template
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# App Configuration
|
| 2 |
+
APP_NAME="Examify AI Services"
|
| 3 |
+
APP_ENV="development"
|
| 4 |
+
DEBUG=true
|
| 5 |
+
API_V1_STR="/api/v1"
|
| 6 |
+
|
| 7 |
+
# Security
|
| 8 |
+
SECRET_KEY="your-super-secret-key-here"
|
| 9 |
+
ACCESS_TOKEN_EXPIRE_MINUTES=11520
|
| 10 |
+
|
| 11 |
+
# AI Providers
|
| 12 |
+
GROQ_API_KEY="gsk_..."
|
| 13 |
+
GEMINI_API_KEY="..."
|
| 14 |
+
OPENAI_API_KEY="sk-..."
|
| 15 |
+
|
| 16 |
+
# Models
|
| 17 |
+
DEFAULT_CHAT_MODEL="llama-3-70b-8192"
|
| 18 |
+
DEFAULT_EXAM_MODEL="gemini-1.5-pro-latest"
|
| 19 |
+
|
| 20 |
+
# Logging
|
| 21 |
+
LOG_LEVEL="INFO"
|
| 22 |
+
|
| 23 |
+
# Proctoring Configuration
|
| 24 |
+
FACE_VERIFY_THRESHOLD=0.80
|
| 25 |
+
OBJECT_DETECTION_CONFIDENCE=0.50
|
| 26 |
+
MAX_IMAGE_SIZE_MB=5
|
.gitignore
ADDED
|
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Environments
|
| 2 |
+
.env
|
| 3 |
+
.venv
|
| 4 |
+
env/
|
| 5 |
+
venv/
|
| 6 |
+
ENV/
|
| 7 |
+
env.bak/
|
| 8 |
+
venv.bak/
|
| 9 |
+
|
| 10 |
+
# Byte-compiled / optimized / DLL files
|
| 11 |
+
__pycache__/
|
| 12 |
+
*.py[cod]
|
| 13 |
+
*$py.class
|
| 14 |
+
|
| 15 |
+
# Pytest / Coverage
|
| 16 |
+
.pytest_cache/
|
| 17 |
+
htmlcov/
|
| 18 |
+
.coverage
|
| 19 |
+
.coverage.*
|
| 20 |
+
|
| 21 |
+
# Huggingface/DeepFace Weights & ONNX Models
|
| 22 |
+
.deepface/
|
| 23 |
+
*.h5
|
| 24 |
+
*.onnx
|
| 25 |
+
facenet_weights.h5
|
| 26 |
+
openface_weights.h5
|
| 27 |
+
vgg_face_weights.h5
|
| 28 |
+
|
| 29 |
+
# Redundant backup folders (The ones causing issues)
|
| 30 |
+
Ai Things/
|
| 31 |
+
Examify_AI_Services/
|
| 32 |
+
|
| 33 |
+
# OS generated files
|
| 34 |
+
.DS_Store
|
| 35 |
+
.DS_Store?
|
| 36 |
+
._*
|
| 37 |
+
.Spotlight-V100
|
| 38 |
+
.Trashes
|
| 39 |
+
ehthumbs.db
|
| 40 |
+
Thumbs.db
|
Dockerfile
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
FROM python:3.10-slim
|
| 2 |
+
|
| 3 |
+
WORKDIR /app
|
| 4 |
+
|
| 5 |
+
# تنصيب مكتبات النظام الأساسية للتعامل مع الصور (OpenCV) وتحميل الملفات
|
| 6 |
+
RUN apt-get update && apt-get install -y --no-install-recommends \
|
| 7 |
+
libglib2.0-0 \
|
| 8 |
+
libgl1-mesa-glx \
|
| 9 |
+
wget \
|
| 10 |
+
&& rm -rf /var/lib/apt/lists/*
|
| 11 |
+
|
| 12 |
+
# تنصيب الباكيدجات
|
| 13 |
+
COPY requirements.txt .
|
| 14 |
+
RUN pip install --no-cache-dir -r requirements.txt
|
| 15 |
+
|
| 16 |
+
# إنشاء مستخدم Hugging Face القياسي (User ID 1000)
|
| 17 |
+
RUN useradd -m -u 1000 user
|
| 18 |
+
USER user
|
| 19 |
+
ENV HOME=/home/user
|
| 20 |
+
ENV PATH=/home/user/.local/bin:$PATH
|
| 21 |
+
|
| 22 |
+
# تجهيز مجلدات الموديلات
|
| 23 |
+
RUN mkdir -p /home/user/.deepface/weights \
|
| 24 |
+
&& mkdir -p /app/app/ml_models
|
| 25 |
+
|
| 26 |
+
# تحميل أوزان وموديلات الذكاء الاصطناعي (أثناء الـ Build عشان الـ Git يكون خفيف)
|
| 27 |
+
# 1. Facenet Weights
|
| 28 |
+
RUN wget -q -O /home/user/.deepface/weights/facenet_weights.h5 \
|
| 29 |
+
https://huggingface.co/junjiang/GestureFace/resolve/main/facenet_weights.h5
|
| 30 |
+
|
| 31 |
+
# 2. Face Detection (YuNet)
|
| 32 |
+
RUN wget -q -O /app/app/ml_models/face_detection_yunet.onnx \
|
| 33 |
+
https://github.com/opencv/opencv_zoo/raw/master/models/face_detection_yunet/face_detection_yunet_2023mar.onnx
|
| 34 |
+
|
| 35 |
+
# 3. Object Detection (YOLOv8n)
|
| 36 |
+
RUN wget -q -O /app/app/ml_models/yolov8n.onnx \
|
| 37 |
+
https://github.com/ultralytics/assets/releases/download/v8.2.0/yolov8n.onnx
|
| 38 |
+
|
| 39 |
+
# نسخ الكود وإعطاء الصلاحيات للمستخدم الجديد
|
| 40 |
+
COPY --chown=user:user . /app
|
| 41 |
+
|
| 42 |
+
# السماح للـ API بالاستماع على بورت 7860 (Hugging Face Default)
|
| 43 |
+
EXPOSE 7860
|
| 44 |
+
|
| 45 |
+
# تشغيل السيرفر!
|
| 46 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
|
README.md
ADDED
|
@@ -0,0 +1,136 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Examify AI Services 🎓🤖
|
| 2 |
+
|
| 3 |
+
**Examify AI Services** is a robust, modular, and production-ready **FastAPI** backend designed to power AI-driven educational and assessment platforms.
|
| 4 |
+
|
| 5 |
+
It provides advanced generative AI capabilities (for exam generation and intelligent tutoring) alongside state-of-the-art **AI Proctoring** utilizing Computer Vision and Deep Learning models entirely accessible via fast, stateless HTTP endpoints.
|
| 6 |
+
|
| 7 |
+
---
|
| 8 |
+
|
| 9 |
+
## 🌟 Key Features
|
| 10 |
+
|
| 11 |
+
### 1. 👁️🗨️ Intelligent AI Proctoring (`/api/v1/proctoring`)
|
| 12 |
+
A complete suite of tools to maintain exam integrity via webcam feed analysis:
|
| 13 |
+
* **Real-time Face Detection**: Powered by **YuNet (ONNX)**, instantly detects if a face is missing from the frame or if there are multiple faces.
|
| 14 |
+
* **Object & Violation Detection**: Powered by **YOLOv8n (ONNX)**, spots unauthorized items such as mobile phones, laptops, and books. It also detects if an extra person enters the frame.
|
| 15 |
+
* **Face Recognition (Identity Verification)**: Built on **DeepFace (FaceNet)**, registers the student's face before the exam (saving biometric embeddings physically as `.npy` files) and verifies their identity continuously during the exam via cosine similarity.
|
| 16 |
+
* **Combined Frame Analysis**: An all-in-one endpoint (`/analyze-frame`) that runs face detection, object detection, and identity verification in parallel, automatically calculating a `safe`, `warning`, or `critical` risk level for the analyzed frame.
|
| 17 |
+
|
| 18 |
+
### 2. 📝 Exam Generation (`/api/v1/exams`)
|
| 19 |
+
* Generates customized quiz questions (Multiple Choice, True/False, Short Answer) from provided study material content using Generative AI (e.g., Gemini, Groq, OpenAI).
|
| 20 |
+
|
| 21 |
+
### 3. 💬 AI Chat & Tutoring (`/api/v1/chat`)
|
| 22 |
+
* Provides both standard and streaming (SSE) conversational endpoints to power AI assistants and interactive student tutors.
|
| 23 |
+
|
| 24 |
+
---
|
| 25 |
+
|
| 26 |
+
## 🏗️ Project Architecture
|
| 27 |
+
|
| 28 |
+
```plaintext
|
| 29 |
+
Examify_AI_Services/
|
| 30 |
+
├── app/
|
| 31 |
+
│ ├── api/ # FastAPI routers and endpoint definitions (chat, exams, proctoring)
|
| 32 |
+
│ ├── core/ # App configuration and logging (Pydantic Settings)
|
| 33 |
+
│ ├── ml_artifacts/ # Stores student face embeddings (.npy) generated by DeepFace
|
| 34 |
+
│ ├── ml_models/ # Local ONNX models (yolov8n.onnx, face_detection_yunet.onnx)
|
| 35 |
+
│ ├── schemas/ # Pydantic validation models (Requests/Responses)
|
| 36 |
+
│ ├── services/ # Core business logic and AI integration implementations
|
| 37 |
+
│ └── utils/ # Helper tools like OpenCV base64 image decoding/encoding
|
| 38 |
+
├── main.py # FastAPI application entry point & Lifespan loading
|
| 39 |
+
├── .env.template # Environment variables template
|
| 40 |
+
├── Dockerfile # Configured for Headless OpenCV execution
|
| 41 |
+
└── requirements.txt # Python dependencies
|
| 42 |
+
```
|
| 43 |
+
|
| 44 |
+
---
|
| 45 |
+
|
| 46 |
+
## 🚀 Getting Started
|
| 47 |
+
|
| 48 |
+
### Prerequisites
|
| 49 |
+
* **Python 3.10+** installed on your system.
|
| 50 |
+
* Virtual Environment (Highly Recommended)
|
| 51 |
+
|
| 52 |
+
### 1. Installation
|
| 53 |
+
|
| 54 |
+
Clone the repository and install the required dependencies.
|
| 55 |
+
|
| 56 |
+
```powershell
|
| 57 |
+
# Create and activate a virtual environment
|
| 58 |
+
python -m venv venv
|
| 59 |
+
.\venv\Scripts\activate # On Windows
|
| 60 |
+
# source venv/bin/activate # On Mac/Linux
|
| 61 |
+
|
| 62 |
+
# Install dependencies
|
| 63 |
+
python -m pip install -r requirements.txt
|
| 64 |
+
```
|
| 65 |
+
|
| 66 |
+
### 2. Configuration
|
| 67 |
+
|
| 68 |
+
Create a `.env` file in the root directory and copy the contents from `.env.template`. Adjust the keys based on your environment.
|
| 69 |
+
|
| 70 |
+
```env
|
| 71 |
+
# Example .env
|
| 72 |
+
APP_NAME="Examify AI Services"
|
| 73 |
+
DEBUG=true
|
| 74 |
+
|
| 75 |
+
# AI Provider Keys (for Chat and Exams)
|
| 76 |
+
GROQ_API_KEY="your_groq_key"
|
| 77 |
+
GEMINI_API_KEY="your_gemini_key"
|
| 78 |
+
OPENAI_API_KEY="your_openai_key"
|
| 79 |
+
|
| 80 |
+
# Proctoring Configuration
|
| 81 |
+
FACE_VERIFY_THRESHOLD=0.80
|
| 82 |
+
OBJECT_DETECTION_CONFIDENCE=0.50
|
| 83 |
+
```
|
| 84 |
+
|
| 85 |
+
### 3. Running the Server
|
| 86 |
+
|
| 87 |
+
Start the FastAPI application using Uvicorn:
|
| 88 |
+
|
| 89 |
+
```powershell
|
| 90 |
+
python -m uvicorn main:app --host 0.0.0.0 --port 8000 --reload
|
| 91 |
+
```
|
| 92 |
+
|
| 93 |
+
You can now view the auto-generated interactive API documentation at:
|
| 94 |
+
* **Swagger UI:** [http://localhost:8000/docs](http://localhost:8000/docs)
|
| 95 |
+
* **ReDoc:** [http://localhost:8000/redoc](http://localhost:8000/redoc)
|
| 96 |
+
|
| 97 |
+
---
|
| 98 |
+
|
| 99 |
+
## 🎯 Important Proctoring Endpoints
|
| 100 |
+
|
| 101 |
+
All proctoring endpoints expect the webcam images formatted as **Base64 strings**.
|
| 102 |
+
|
| 103 |
+
* `POST /api/v1/proctoring/register-face`
|
| 104 |
+
* **Purpose:** Register a new student's face before the exam.
|
| 105 |
+
* **Payload:** `{"student_id": "ST-101", "images": ["<base64_str>", "<base64_str>...", ...]}` (Recommend sending 5 poses)
|
| 106 |
+
* `POST /api/v1/proctoring/detect-face`
|
| 107 |
+
* **Purpose:** Detect face presence and bounds.
|
| 108 |
+
* **Payload:** `{"image": "<base64_str>"}`
|
| 109 |
+
* `POST /api/v1/proctoring/detect-objects`
|
| 110 |
+
* **Purpose:** Scan for forbidden items.
|
| 111 |
+
* **Payload:** `{"image": "<base64_str>", "detect_books": true}`
|
| 112 |
+
* `POST /api/v1/proctoring/verify-face`
|
| 113 |
+
* **Purpose:** Match the current face against the registered student.
|
| 114 |
+
* **Payload:** `{"student_id": "ST-101", "image": "<base64_str>"}`
|
| 115 |
+
* `POST /api/v1/proctoring/analyze-frame`
|
| 116 |
+
* **Purpose:** All-in-one check for the exam frontend. Returns `risk_level: "safe" | "warning" | "critical"`.
|
| 117 |
+
* **Payload:**
|
| 118 |
+
```json
|
| 119 |
+
{
|
| 120 |
+
"student_id": "ST-101",
|
| 121 |
+
"image": "<base64_str>",
|
| 122 |
+
"detect_books": true,
|
| 123 |
+
"return_annotated": false
|
| 124 |
+
}
|
| 125 |
+
```
|
| 126 |
+
|
| 127 |
+
---
|
| 128 |
+
|
| 129 |
+
## 🐳 Docker Deployment
|
| 130 |
+
|
| 131 |
+
A `Dockerfile` is provided for containerized deployment. It is pre-configured with the necessary system libraries (`libglib2.0-0` and `libgl1-mesa-glx`) required by headless OpenCV.
|
| 132 |
+
|
| 133 |
+
```bash
|
| 134 |
+
docker build -t examify-ai-services .
|
| 135 |
+
docker run -p 8000:8000 --env-file .env examify-ai-services
|
| 136 |
+
```
|
app/api/chat.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import APIRouter, HTTPException
|
| 2 |
+
from fastapi.responses import StreamingResponse
|
| 3 |
+
from app.schemas import ChatRequest, ChatResponse
|
| 4 |
+
from app.services.chat_service import chat_service
|
| 5 |
+
from loguru import logger
|
| 6 |
+
|
| 7 |
+
router = APIRouter(prefix="/chat", tags=["AI Chat"])
|
| 8 |
+
|
| 9 |
+
@router.post("/", response_model=ChatResponse)
|
| 10 |
+
async def chat(request: ChatRequest):
|
| 11 |
+
"""General AI chat endpoint"""
|
| 12 |
+
try:
|
| 13 |
+
response = await chat_service.get_chat_response(request)
|
| 14 |
+
return response
|
| 15 |
+
except Exception as e:
|
| 16 |
+
logger.error(f"Chat error: {str(e)}")
|
| 17 |
+
raise HTTPException(status_code=500, detail="Internal AI service error")
|
| 18 |
+
|
| 19 |
+
@router.post("/stream")
|
| 20 |
+
async def chat_stream(request: ChatRequest):
|
| 21 |
+
"""Streaming AI chat endpoint using SSE"""
|
| 22 |
+
try:
|
| 23 |
+
return StreamingResponse(
|
| 24 |
+
chat_service.stream_chat_response(request),
|
| 25 |
+
media_type="text/event-stream"
|
| 26 |
+
)
|
| 27 |
+
except Exception as e:
|
| 28 |
+
logger.error(f"Chat stream error: {str(e)}")
|
| 29 |
+
raise HTTPException(status_code=500, detail="Internal AI streaming error")
|
app/api/exams.py
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import APIRouter, HTTPException
|
| 2 |
+
from app.schemas import ExamGenerateRequest, ExamResponse
|
| 3 |
+
from app.services.exam_service import exam_service
|
| 4 |
+
from loguru import logger
|
| 5 |
+
|
| 6 |
+
router = APIRouter(prefix="/exams", tags=["Exam Generation"])
|
| 7 |
+
|
| 8 |
+
@router.post("/generate", response_model=ExamResponse)
|
| 9 |
+
async def generate_exam(request: ExamGenerateRequest):
|
| 10 |
+
"""Generate exam from content"""
|
| 11 |
+
try:
|
| 12 |
+
response = await exam_service.generate_exam(request)
|
| 13 |
+
return response
|
| 14 |
+
except Exception as e:
|
| 15 |
+
logger.error(f"Generate exam error: {str(e)}")
|
| 16 |
+
raise HTTPException(status_code=500, detail="Failed to generate exam")
|
app/api/proctoring.py
ADDED
|
@@ -0,0 +1,236 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import json
|
| 3 |
+
import time
|
| 4 |
+
from typing import List, Optional
|
| 5 |
+
from fastapi import APIRouter, HTTPException, UploadFile, File, Form
|
| 6 |
+
from loguru import logger
|
| 7 |
+
|
| 8 |
+
from app.schemas.proctoring import (
|
| 9 |
+
FaceDetectionResponse,
|
| 10 |
+
ObjectDetectionResponse,
|
| 11 |
+
FaceEmbeddingResponse,
|
| 12 |
+
FaceVerifyResponse,
|
| 13 |
+
ProctorFrameResponse
|
| 14 |
+
)
|
| 15 |
+
from app.services.face_detection_service import face_detection_service
|
| 16 |
+
from app.services.object_detection_service import object_detection_service
|
| 17 |
+
from app.services.face_recognition_service import face_recognition_service
|
| 18 |
+
from app.utils.image_utils import decode_image_from_bytes, decode_base64_image
|
| 19 |
+
|
| 20 |
+
router = APIRouter(prefix="/proctoring", tags=["AI Proctoring"])
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
# ─────────────────────────────────────────────
|
| 24 |
+
# Face Detection (مش متغير)
|
| 25 |
+
# ─────────────────────────────────────────────
|
| 26 |
+
|
| 27 |
+
@router.post("/detect-face", response_model=FaceDetectionResponse)
|
| 28 |
+
async def detect_face(file: UploadFile = File(...)):
|
| 29 |
+
"""Detect faces and return bounding box and cropped face"""
|
| 30 |
+
try:
|
| 31 |
+
contents = await file.read()
|
| 32 |
+
image = decode_image_from_bytes(contents)
|
| 33 |
+
return await face_detection_service.detect(image)
|
| 34 |
+
except ValueError as e:
|
| 35 |
+
logger.error(f"Invalid image format: {e}")
|
| 36 |
+
raise HTTPException(status_code=400, detail=str(e))
|
| 37 |
+
except Exception as e:
|
| 38 |
+
logger.error(f"Face detection failed: {e}")
|
| 39 |
+
raise HTTPException(status_code=500, detail="Internal processing error")
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
# ─────────────────────────────────────────────
|
| 43 |
+
# Object Detection (مش متغير)
|
| 44 |
+
# ─────────────────────────────────────────────
|
| 45 |
+
|
| 46 |
+
@router.post("/detect-objects", response_model=ObjectDetectionResponse)
|
| 47 |
+
async def detect_objects(
|
| 48 |
+
detect_books: bool = Form(True),
|
| 49 |
+
confidence_threshold: float = Form(0.50),
|
| 50 |
+
return_annotated: bool = Form(False),
|
| 51 |
+
file: UploadFile = File(...)
|
| 52 |
+
):
|
| 53 |
+
"""Detect objects/violations in the provided image file"""
|
| 54 |
+
try:
|
| 55 |
+
contents = await file.read()
|
| 56 |
+
image = decode_image_from_bytes(contents)
|
| 57 |
+
return await object_detection_service.detect(
|
| 58 |
+
image=image,
|
| 59 |
+
detect_books=detect_books,
|
| 60 |
+
conf_threshold=confidence_threshold,
|
| 61 |
+
return_annotated=return_annotated
|
| 62 |
+
)
|
| 63 |
+
except ValueError as e:
|
| 64 |
+
raise HTTPException(status_code=400, detail=str(e))
|
| 65 |
+
except Exception as e:
|
| 66 |
+
logger.error(f"Object detection failed: {e}")
|
| 67 |
+
raise HTTPException(status_code=500, detail="Internal processing error")
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
# ─────────────────────────────────────────────
|
| 71 |
+
# Face Registration (Stateless - يرجع الـ Embedding للـ .NET)
|
| 72 |
+
# ─────────────────────────────────────────────
|
| 73 |
+
|
| 74 |
+
@router.post("/register-face", response_model=FaceEmbeddingResponse)
|
| 75 |
+
async def register_face(
|
| 76 |
+
student_id: str = Form(...),
|
| 77 |
+
file: UploadFile = File(...)
|
| 78 |
+
):
|
| 79 |
+
"""
|
| 80 |
+
## Register a Student's Face
|
| 81 |
+
|
| 82 |
+
Extracts a 512-dimensional embedding array from the uploaded image.
|
| 83 |
+
|
| 84 |
+
### How it works with .NET:
|
| 85 |
+
1. Send a clear portrait image of the student's face.
|
| 86 |
+
2. The API calculates the `embedding` vector (a mathematical representation).
|
| 87 |
+
3. The `.NET API` must store this `embedding` array in **PostgreSQL** alongside the `student_id`.
|
| 88 |
+
4. **No data is stored locally by this service.**
|
| 89 |
+
"""
|
| 90 |
+
try:
|
| 91 |
+
contents = await file.read()
|
| 92 |
+
image = decode_image_from_bytes(contents)
|
| 93 |
+
|
| 94 |
+
return await face_recognition_service.register_student(
|
| 95 |
+
student_id=student_id,
|
| 96 |
+
face_images=[image]
|
| 97 |
+
)
|
| 98 |
+
except ValueError as e:
|
| 99 |
+
raise HTTPException(status_code=400, detail=str(e))
|
| 100 |
+
except Exception as e:
|
| 101 |
+
logger.error(f"Face registration failed: {e}")
|
| 102 |
+
raise HTTPException(status_code=500, detail="Internal processing error")
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
# ─────────────────────────────────────────────
|
| 106 |
+
# Face Verification (Stateless - يستقبل الـ Embedding من الـ .NET)
|
| 107 |
+
# ─────────────────────────────────────────────
|
| 108 |
+
|
| 109 |
+
@router.post("/verify-face", response_model=FaceVerifyResponse)
|
| 110 |
+
async def verify_face(
|
| 111 |
+
student_id: str = Form(...),
|
| 112 |
+
reference_embedding: str = Form(
|
| 113 |
+
...,
|
| 114 |
+
description="JSON array of floats (the stored embedding from .NET DB). "
|
| 115 |
+
"Example: [0.12, -0.34, 0.56, ...]"
|
| 116 |
+
),
|
| 117 |
+
file: UploadFile = File(...)
|
| 118 |
+
):
|
| 119 |
+
"""
|
| 120 |
+
## Verify a Student's Identity (During Exam)
|
| 121 |
+
|
| 122 |
+
Compares a live camera frame against the stored embedding to detect cheating/impersonation.
|
| 123 |
+
|
| 124 |
+
### How it works with .NET:
|
| 125 |
+
1. Fetch the student's `embedding` array from the **PostgreSQL** database.
|
| 126 |
+
2. Pass that array as a JSON string to `reference_embedding`. *(Example: `[0.12, -0.34, 0.56, ...]`. Do NOT pass the literal string `"string"`).*
|
| 127 |
+
3. Provide the new `image` from the student's webcam.
|
| 128 |
+
4. The API calculates the `Cosine Distance`. If it's `< 0.40`, the student is `verified: true`.
|
| 129 |
+
"""
|
| 130 |
+
try:
|
| 131 |
+
# Parse the reference embedding from JSON string to List[float]
|
| 132 |
+
try:
|
| 133 |
+
embedding_list = json.loads(reference_embedding)
|
| 134 |
+
if not isinstance(embedding_list, list):
|
| 135 |
+
raise ValueError("reference_embedding must be a JSON array of numbers")
|
| 136 |
+
except json.JSONDecodeError:
|
| 137 |
+
raise ValueError(
|
| 138 |
+
"Invalid reference_embedding format. Must be a valid JSON array. "
|
| 139 |
+
"Example: [0.12, -0.34, 0.56, ...]"
|
| 140 |
+
)
|
| 141 |
+
|
| 142 |
+
contents = await file.read()
|
| 143 |
+
image = decode_image_from_bytes(contents)
|
| 144 |
+
|
| 145 |
+
return await face_recognition_service.verify_face(
|
| 146 |
+
student_id=student_id,
|
| 147 |
+
face_image=image,
|
| 148 |
+
reference_embedding=embedding_list
|
| 149 |
+
)
|
| 150 |
+
except ValueError as e:
|
| 151 |
+
raise HTTPException(status_code=400, detail=str(e))
|
| 152 |
+
except Exception as e:
|
| 153 |
+
logger.error(f"Face verification failed: {e}")
|
| 154 |
+
raise HTTPException(status_code=500, detail="Internal processing error")
|
| 155 |
+
|
| 156 |
+
|
| 157 |
+
# ─────────────────────────────────────────────
|
| 158 |
+
# Full Frame Analysis (Stateless)
|
| 159 |
+
# ─────────────────────────────────────────────
|
| 160 |
+
|
| 161 |
+
@router.post("/analyze-frame", response_model=ProctorFrameResponse)
|
| 162 |
+
async def analyze_frame(
|
| 163 |
+
student_id: str = Form(...),
|
| 164 |
+
detect_books: bool = Form(True),
|
| 165 |
+
return_annotated: bool = Form(False),
|
| 166 |
+
reference_embedding: Optional[str] = Form(
|
| 167 |
+
None,
|
| 168 |
+
description="Optional JSON array of floats for face verification. "
|
| 169 |
+
"If not provided, verification will be skipped."
|
| 170 |
+
),
|
| 171 |
+
file: UploadFile = File(...)
|
| 172 |
+
):
|
| 173 |
+
"""
|
| 174 |
+
Full AI proctoring flow in one request.
|
| 175 |
+
|
| 176 |
+
If `reference_embedding` is provided, face verification will be performed.
|
| 177 |
+
Otherwise, only face detection and object detection will run.
|
| 178 |
+
"""
|
| 179 |
+
start_total = time.perf_counter()
|
| 180 |
+
try:
|
| 181 |
+
contents = await file.read()
|
| 182 |
+
image = decode_image_from_bytes(contents)
|
| 183 |
+
|
| 184 |
+
# Run Face and Object detection in parallel
|
| 185 |
+
face_task = face_detection_service.detect(image)
|
| 186 |
+
object_task = object_detection_service.detect(
|
| 187 |
+
image=image,
|
| 188 |
+
detect_books=detect_books,
|
| 189 |
+
return_annotated=return_annotated
|
| 190 |
+
)
|
| 191 |
+
face_result, obj_result = await asyncio.gather(face_task, object_task)
|
| 192 |
+
|
| 193 |
+
# Optional Verification (only if reference_embedding is provided)
|
| 194 |
+
verify_result = None
|
| 195 |
+
if face_result.status == "single_face" and reference_embedding:
|
| 196 |
+
try:
|
| 197 |
+
embedding_list = json.loads(reference_embedding)
|
| 198 |
+
cropped_face_img = decode_base64_image(face_result.face_image)
|
| 199 |
+
verify_result = await face_recognition_service.verify_face(
|
| 200 |
+
student_id=student_id,
|
| 201 |
+
face_image=cropped_face_img,
|
| 202 |
+
reference_embedding=embedding_list
|
| 203 |
+
)
|
| 204 |
+
except (json.JSONDecodeError, ValueError) as ve:
|
| 205 |
+
logger.warning(f"Skipping verification: {ve}")
|
| 206 |
+
|
| 207 |
+
# Compute Risk
|
| 208 |
+
violations_count = int(face_result.violation) + obj_result.violation_count
|
| 209 |
+
|
| 210 |
+
risk = "safe"
|
| 211 |
+
if violations_count > 0:
|
| 212 |
+
if face_result.violation or (verify_result and not verify_result.verified):
|
| 213 |
+
risk = "critical"
|
| 214 |
+
else:
|
| 215 |
+
critical_types = ["extra_person", "mobile_detected", "laptop_detected"]
|
| 216 |
+
has_critical = any(v.type in critical_types for v in obj_result.violations)
|
| 217 |
+
risk = "critical" if has_critical else "warning"
|
| 218 |
+
elif verify_result and not verify_result.verified:
|
| 219 |
+
risk = "critical"
|
| 220 |
+
|
| 221 |
+
processing_time_ms = (time.perf_counter() - start_total) * 1000
|
| 222 |
+
|
| 223 |
+
return ProctorFrameResponse(
|
| 224 |
+
face_detection=face_result,
|
| 225 |
+
object_detection=obj_result,
|
| 226 |
+
face_verification=verify_result,
|
| 227 |
+
total_violations=violations_count,
|
| 228 |
+
risk_level=risk,
|
| 229 |
+
processing_time_ms=processing_time_ms
|
| 230 |
+
)
|
| 231 |
+
|
| 232 |
+
except ValueError as e:
|
| 233 |
+
raise HTTPException(status_code=400, detail=str(e))
|
| 234 |
+
except Exception as e:
|
| 235 |
+
logger.error(f"Analyze frame failed: {e}")
|
| 236 |
+
raise HTTPException(status_code=500, detail="Internal processing error")
|
app/api/router.py
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from fastapi import APIRouter
|
| 2 |
+
from app.api.chat import router as chat_router
|
| 3 |
+
from app.api.exams import router as exam_router
|
| 4 |
+
from app.api.proctoring import router as proctoring_router
|
| 5 |
+
|
| 6 |
+
api_router = APIRouter()
|
| 7 |
+
|
| 8 |
+
api_router.include_router(chat_router)
|
| 9 |
+
api_router.include_router(exam_router)
|
| 10 |
+
api_router.include_router(proctoring_router)
|
app/core/config.py
ADDED
|
@@ -0,0 +1,55 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import os
|
| 2 |
+
from typing import List, Optional
|
| 3 |
+
from pydantic_settings import BaseSettings, SettingsConfigDict
|
| 4 |
+
from pydantic import Field
|
| 5 |
+
|
| 6 |
+
class Settings(BaseSettings):
|
| 7 |
+
model_config = SettingsConfigDict(
|
| 8 |
+
env_file=".env",
|
| 9 |
+
env_file_encoding="utf-8",
|
| 10 |
+
extra="ignore"
|
| 11 |
+
)
|
| 12 |
+
|
| 13 |
+
# App
|
| 14 |
+
APP_NAME: str = "Examify AI Services"
|
| 15 |
+
APP_ENV: str = "development"
|
| 16 |
+
DEBUG: bool = True
|
| 17 |
+
API_V1_STR: str = "/api/v1"
|
| 18 |
+
|
| 19 |
+
# Security
|
| 20 |
+
SECRET_KEY: str = "placeholder-key-for-dev"
|
| 21 |
+
ACCESS_TOKEN_EXPIRE_MINUTES: int = 60 * 24 * 8 # 8 days
|
| 22 |
+
|
| 23 |
+
# AI Providers
|
| 24 |
+
GROQ_API_KEY: Optional[str] = None
|
| 25 |
+
GEMINI_API_KEY: Optional[str] = None
|
| 26 |
+
OPENAI_API_KEY: Optional[str] = None
|
| 27 |
+
|
| 28 |
+
# Models
|
| 29 |
+
DEFAULT_CHAT_MODEL: str = "llama3-70b-8192"
|
| 30 |
+
DEFAULT_EXAM_MODEL: str = "gemini-1.5-pro"
|
| 31 |
+
|
| 32 |
+
# Middleware
|
| 33 |
+
CORS_ORIGINS: List[str] = ["*"]
|
| 34 |
+
GZIP_MIN_SIZE: int = 500
|
| 35 |
+
|
| 36 |
+
# Project Paths
|
| 37 |
+
ROOT_DIR: str = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
|
| 38 |
+
ML_ARTIFACTS_DIR: str = os.path.join(ROOT_DIR, "app", "ml_artifacts")
|
| 39 |
+
|
| 40 |
+
# Proctoring Models
|
| 41 |
+
FACE_DETECTION_MODEL_PATH: str = os.path.join(ROOT_DIR, "app", "ml_models", "face_detection_yunet.onnx")
|
| 42 |
+
OBJECT_DETECTION_MODEL_PATH: str = os.path.join(ROOT_DIR, "app", "ml_models", "yolov8n.onnx")
|
| 43 |
+
|
| 44 |
+
# Proctoring Thresholds
|
| 45 |
+
FACE_VERIFY_THRESHOLD: float = 0.80
|
| 46 |
+
FACE_VERIFY_HARD_REJECT: float = 1.00
|
| 47 |
+
OBJECT_DETECTION_CONFIDENCE: float = 0.50
|
| 48 |
+
OBJECT_DETECTION_NMS: float = 0.45
|
| 49 |
+
|
| 50 |
+
# Image Limits
|
| 51 |
+
MAX_IMAGE_SIZE_MB: int = 5
|
| 52 |
+
MAX_IMAGE_WIDTH: int = 1920
|
| 53 |
+
MAX_IMAGE_HEIGHT: int = 1080
|
| 54 |
+
|
| 55 |
+
settings = Settings()
|
app/core/logging_config.py
ADDED
|
@@ -0,0 +1,56 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import os
|
| 3 |
+
import time
|
| 4 |
+
import logging
|
| 5 |
+
from loguru import logger
|
| 6 |
+
from fastapi import Request
|
| 7 |
+
from starlette.middleware.base import BaseHTTPMiddleware
|
| 8 |
+
|
| 9 |
+
class RequestLoggingMiddleware(BaseHTTPMiddleware):
|
| 10 |
+
async def dispatch(self, request: Request, call_next):
|
| 11 |
+
start_time = time.time()
|
| 12 |
+
request_id = request.headers.get("X-Request-ID", "internal")
|
| 13 |
+
|
| 14 |
+
# Add correlation ID to context
|
| 15 |
+
logger.configure(extra={"request_id": request_id})
|
| 16 |
+
|
| 17 |
+
response = await call_next(request)
|
| 18 |
+
|
| 19 |
+
process_time = (time.time() - start_time) * 1000
|
| 20 |
+
formatted_process_time = "{0:.2f}".format(process_time)
|
| 21 |
+
|
| 22 |
+
logger.info(
|
| 23 |
+
f"RID: {request_id} | {request.method} {request.url.path} | "
|
| 24 |
+
f"Status: {response.status_code} | Duration: {formatted_process_time}ms"
|
| 25 |
+
)
|
| 26 |
+
|
| 27 |
+
return response
|
| 28 |
+
|
| 29 |
+
def setup_logging():
|
| 30 |
+
# Remove default handlers
|
| 31 |
+
logging.getLogger().handlers = []
|
| 32 |
+
|
| 33 |
+
# Configure Loguru
|
| 34 |
+
logger.remove()
|
| 35 |
+
logger.add(
|
| 36 |
+
sys.stdout,
|
| 37 |
+
format="<green>{time:YYYY-MM-DD HH:mm:ss}</green> | <level>{level: <8}</level> | <cyan>{name}</cyan>:<cyan>{function}</cyan>:<cyan>{line}</cyan> - <level>{message}</level>",
|
| 38 |
+
level="INFO"
|
| 39 |
+
)
|
| 40 |
+
|
| 41 |
+
# Intercept standard logging
|
| 42 |
+
class InterceptHandler(logging.Handler):
|
| 43 |
+
def emit(self, record):
|
| 44 |
+
try:
|
| 45 |
+
level = logger.level(record.levelname).name
|
| 46 |
+
except ValueError:
|
| 47 |
+
level = record.levelno
|
| 48 |
+
|
| 49 |
+
frame, depth = logging.currentframe(), 2
|
| 50 |
+
while frame.f_code.co_filename == logging.__file__:
|
| 51 |
+
frame = frame.f_back
|
| 52 |
+
depth += 1
|
| 53 |
+
|
| 54 |
+
logger.opt(depth=depth, exception=record.exc_info).log(level, record.getMessage())
|
| 55 |
+
|
| 56 |
+
logging.basicConfig(handlers=[InterceptHandler()], level=0)
|
app/schemas/__init__.py
ADDED
|
@@ -0,0 +1,46 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
schemas/__init__.py
|
| 3 |
+
-------------------
|
| 4 |
+
Backward-compatible re-export of all schemas.
|
| 5 |
+
Existing imports like `from app.schemas import ChatRequest` continue to work.
|
| 6 |
+
"""
|
| 7 |
+
# Common
|
| 8 |
+
from .common import ErrorResponse
|
| 9 |
+
|
| 10 |
+
# Chat
|
| 11 |
+
from .chat import Role, Message, ChatRequest, ChatResponse
|
| 12 |
+
|
| 13 |
+
# Exam
|
| 14 |
+
from .exam import QuestionType, ExamGenerateRequest, QuestionSchema, ExamResponse
|
| 15 |
+
|
| 16 |
+
# Proctoring
|
| 17 |
+
from .proctoring import (
|
| 18 |
+
BoundingBox,
|
| 19 |
+
FaceDetectionRequest,
|
| 20 |
+
FaceDetectionResponse,
|
| 21 |
+
ObjectDetectionRequest,
|
| 22 |
+
ViolationItem,
|
| 23 |
+
ObjectDetectionResponse,
|
| 24 |
+
FaceEmbeddingRequest,
|
| 25 |
+
FaceEmbeddingResponse,
|
| 26 |
+
FaceVerifyRequest,
|
| 27 |
+
FaceVerifyResponse,
|
| 28 |
+
ProctorFrameRequest,
|
| 29 |
+
ProctorFrameResponse,
|
| 30 |
+
)
|
| 31 |
+
|
| 32 |
+
__all__ = [
|
| 33 |
+
# common
|
| 34 |
+
"ErrorResponse",
|
| 35 |
+
# chat
|
| 36 |
+
"Role", "Message", "ChatRequest", "ChatResponse",
|
| 37 |
+
# exam
|
| 38 |
+
"QuestionType", "ExamGenerateRequest", "QuestionSchema", "ExamResponse",
|
| 39 |
+
# proctoring
|
| 40 |
+
"BoundingBox",
|
| 41 |
+
"FaceDetectionRequest", "FaceDetectionResponse",
|
| 42 |
+
"ObjectDetectionRequest", "ViolationItem", "ObjectDetectionResponse",
|
| 43 |
+
"FaceEmbeddingRequest", "FaceEmbeddingResponse",
|
| 44 |
+
"FaceVerifyRequest", "FaceVerifyResponse",
|
| 45 |
+
"ProctorFrameRequest", "ProctorFrameResponse",
|
| 46 |
+
]
|
app/schemas/chat.py
ADDED
|
@@ -0,0 +1,27 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Dict, Any, Optional
|
| 2 |
+
from pydantic import BaseModel
|
| 3 |
+
from enum import Enum
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class Role(str, Enum):
|
| 7 |
+
USER = "user"
|
| 8 |
+
ASSISTANT = "assistant"
|
| 9 |
+
SYSTEM = "system"
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class Message(BaseModel):
|
| 13 |
+
role: Role
|
| 14 |
+
content: str
|
| 15 |
+
|
| 16 |
+
|
| 17 |
+
class ChatRequest(BaseModel):
|
| 18 |
+
messages: list[Message]
|
| 19 |
+
model: Optional[str] = None
|
| 20 |
+
stream: bool = False
|
| 21 |
+
temperature: float = 0.7
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
class ChatResponse(BaseModel):
|
| 25 |
+
content: str
|
| 26 |
+
model: str
|
| 27 |
+
usage: Dict[str, Any]
|
app/schemas/common.py
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import Optional
|
| 2 |
+
from pydantic import BaseModel
|
| 3 |
+
|
| 4 |
+
|
| 5 |
+
class ErrorResponse(BaseModel):
|
| 6 |
+
error: str
|
| 7 |
+
detail: Optional[str] = None
|
| 8 |
+
request_id: str
|
app/schemas/exam.py
ADDED
|
@@ -0,0 +1,31 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from typing import List, Optional
|
| 2 |
+
from pydantic import BaseModel, Field
|
| 3 |
+
from enum import Enum
|
| 4 |
+
|
| 5 |
+
|
| 6 |
+
class QuestionType(str, Enum):
|
| 7 |
+
MCQ = "mcq"
|
| 8 |
+
TRUE_FALSE = "true_false"
|
| 9 |
+
SHORT_ANSWER = "short_answer"
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class ExamGenerateRequest(BaseModel):
|
| 13 |
+
content: str = Field(..., description="Text or source to generate questions from")
|
| 14 |
+
num_questions: int = Field(default=5, ge=1, le=20)
|
| 15 |
+
difficulty: str = "medium"
|
| 16 |
+
question_types: List[QuestionType] = [QuestionType.MCQ]
|
| 17 |
+
|
| 18 |
+
|
| 19 |
+
class QuestionSchema(BaseModel):
|
| 20 |
+
id: str
|
| 21 |
+
text: str
|
| 22 |
+
type: QuestionType
|
| 23 |
+
options: Optional[List[str]] = None
|
| 24 |
+
answer: str
|
| 25 |
+
explanation: Optional[str] = None
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
class ExamResponse(BaseModel):
|
| 29 |
+
exam_id: str
|
| 30 |
+
title: str
|
| 31 |
+
questions: List[QuestionSchema]
|
app/schemas/proctoring.py
ADDED
|
@@ -0,0 +1,128 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
proctoring.py
|
| 3 |
+
-------------
|
| 4 |
+
Pydantic schemas for all AI Proctoring endpoints:
|
| 5 |
+
- Face Detection
|
| 6 |
+
- Object Detection
|
| 7 |
+
- Face Recognition (register + verify)
|
| 8 |
+
- Combined frame analysis
|
| 9 |
+
"""
|
| 10 |
+
from __future__ import annotations
|
| 11 |
+
|
| 12 |
+
from typing import List, Literal, Optional
|
| 13 |
+
from pydantic import BaseModel, Field
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
# ─────────────────────────────────────────────
|
| 17 |
+
# Shared sub-schemas
|
| 18 |
+
# ─────────────────────────────────────────────
|
| 19 |
+
|
| 20 |
+
class BoundingBox(BaseModel):
|
| 21 |
+
x: int
|
| 22 |
+
y: int
|
| 23 |
+
width: int
|
| 24 |
+
height: int
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
# ─────────────────────────────────────────────
|
| 28 |
+
# Face Detection
|
| 29 |
+
# ─────────────────────────────────────────────
|
| 30 |
+
|
| 31 |
+
class FaceDetectionRequest(BaseModel):
|
| 32 |
+
image: str = Field(..., description="Base64-encoded BGR image")
|
| 33 |
+
session_id: Optional[str] = Field(None, description="Exam session ID for tracking")
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
class FaceDetectionResponse(BaseModel):
|
| 37 |
+
face_count: int = Field(..., description="Number of faces detected")
|
| 38 |
+
status: Literal["no_face", "single_face", "multiple_faces"]
|
| 39 |
+
bounding_box: Optional[BoundingBox] = Field(None, description="Bounding box of single detected face")
|
| 40 |
+
face_image: Optional[str] = Field(None, description="Base64 cropped face image (only when 1 face found)")
|
| 41 |
+
violation: bool = Field(..., description="True if face_count != 1")
|
| 42 |
+
processing_time_ms: float
|
| 43 |
+
|
| 44 |
+
|
| 45 |
+
# ─────────────────────────────────────────────
|
| 46 |
+
# Object Detection
|
| 47 |
+
# ─────────────────────────────────────────────
|
| 48 |
+
|
| 49 |
+
class ObjectDetectionRequest(BaseModel):
|
| 50 |
+
image: str = Field(..., description="Base64-encoded BGR image")
|
| 51 |
+
detect_books: bool = Field(True, description="Set False for open-book exams")
|
| 52 |
+
confidence_threshold: float = Field(0.50, ge=0.1, le=1.0,
|
| 53 |
+
description="Minimum detection confidence")
|
| 54 |
+
return_annotated: bool = Field(False, description="Return frame with drawn bounding boxes")
|
| 55 |
+
session_id: Optional[str] = None
|
| 56 |
+
|
| 57 |
+
|
| 58 |
+
class ViolationItem(BaseModel):
|
| 59 |
+
type: Literal["mobile_detected", "laptop_detected", "book_detected", "extra_person"]
|
| 60 |
+
confidence: float = Field(..., ge=0.0, le=1.0)
|
| 61 |
+
bounding_box: BoundingBox
|
| 62 |
+
|
| 63 |
+
|
| 64 |
+
class ObjectDetectionResponse(BaseModel):
|
| 65 |
+
violations: List[ViolationItem]
|
| 66 |
+
violation_count: int
|
| 67 |
+
has_violations: bool
|
| 68 |
+
annotated_image: Optional[str] = Field(None,
|
| 69 |
+
description="Base64 image with drawn bounding boxes (only if return_annotated=True)")
|
| 70 |
+
processing_time_ms: float
|
| 71 |
+
|
| 72 |
+
|
| 73 |
+
# ─────────────────────────────────────────────
|
| 74 |
+
# Face Recognition — Register
|
| 75 |
+
# ─────────────────────────────────────────────
|
| 76 |
+
|
| 77 |
+
class FaceEmbeddingRequest(BaseModel):
|
| 78 |
+
images: List[str] = Field(..., min_length=1, max_length=10,
|
| 79 |
+
description="List of Base64 face images (ideally 5 poses)")
|
| 80 |
+
student_id: str = Field(..., min_length=1, description="Unique student identifier")
|
| 81 |
+
|
| 82 |
+
|
| 83 |
+
class FaceEmbeddingResponse(BaseModel):
|
| 84 |
+
student_id: str = Field(..., description="The unique identifier of the registered student.")
|
| 85 |
+
embeddings_count: int = Field(..., description="Number of face images processed to create the embedding.")
|
| 86 |
+
registered: bool = Field(..., description="Indicates if the registration was successful.")
|
| 87 |
+
embedding: List[float] = Field(
|
| 88 |
+
...,
|
| 89 |
+
description="The 512-dimensional face embedding vector generated by Facenet. "
|
| 90 |
+
"**IMPORTANT FOR .NET API:** Serialize this array as JSON or store it as a `vector`/`double precision[]` in PostgreSQL, and provide it back to the `/verify-face` endpoint during exams."
|
| 91 |
+
)
|
| 92 |
+
|
| 93 |
+
|
| 94 |
+
# ─────────────────────────────────────────────
|
| 95 |
+
# Face Recognition — Verify
|
| 96 |
+
# ─────────────────────────────────────────────
|
| 97 |
+
|
| 98 |
+
class FaceVerifyRequest(BaseModel):
|
| 99 |
+
image: str = Field(..., description="Base64 encoded live face image to verify (captured during the exam).")
|
| 100 |
+
student_id: str = Field(..., description="The student ID to verify against.")
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
class FaceVerifyResponse(BaseModel):
|
| 104 |
+
verified: bool = Field(..., description="True if the face matches the stored reference embedding within the allowed threshold.")
|
| 105 |
+
distance: float = Field(..., description="The calculated Cosine distance between the live face and the reference embedding. (Lower is better/closer match).")
|
| 106 |
+
threshold: float = Field(0.40, description="The maximum allowed distance threshold used for verification.")
|
| 107 |
+
status: Literal["verified", "suspicious", "rejected"] = Field(..., description="The final decision status (verified < 0.40, suspicious < 0.60, rejected >= 0.60).")
|
| 108 |
+
|
| 109 |
+
|
| 110 |
+
# ─────────────────────────────────────────────
|
| 111 |
+
# Combined Frame Analysis
|
| 112 |
+
# ─────────────────────────────────────────────
|
| 113 |
+
|
| 114 |
+
class ProctorFrameRequest(BaseModel):
|
| 115 |
+
image: str = Field(..., description="Base64-encoded webcam frame")
|
| 116 |
+
student_id: str = Field(..., description="Student ID for face verification")
|
| 117 |
+
detect_books: bool = Field(True, description="Set False for open-book exams")
|
| 118 |
+
return_annotated: bool = Field(False, description="Return annotated image with violations drawn")
|
| 119 |
+
session_id: Optional[str] = None
|
| 120 |
+
|
| 121 |
+
|
| 122 |
+
class ProctorFrameResponse(BaseModel):
|
| 123 |
+
face_detection: FaceDetectionResponse
|
| 124 |
+
object_detection: ObjectDetectionResponse
|
| 125 |
+
face_verification: Optional[FaceVerifyResponse] = None
|
| 126 |
+
total_violations: int = Field(..., description="Total number of violations detected")
|
| 127 |
+
risk_level: Literal["safe", "warning", "critical"]
|
| 128 |
+
processing_time_ms: float = Field(..., description="Total round-trip processing time")
|
app/services/chat_service.py
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
from typing import AsyncGenerator, Dict, Any
|
| 3 |
+
from groq import AsyncGroq
|
| 4 |
+
from loguru import logger
|
| 5 |
+
|
| 6 |
+
from app.schemas import ChatRequest, ChatResponse
|
| 7 |
+
from app.core.config import settings
|
| 8 |
+
|
| 9 |
+
class ChatService:
|
| 10 |
+
def __init__(self):
|
| 11 |
+
self.model = settings.DEFAULT_CHAT_MODEL
|
| 12 |
+
self.api_key = settings.GROQ_API_KEY
|
| 13 |
+
self._client = None
|
| 14 |
+
|
| 15 |
+
@property
|
| 16 |
+
def client(self):
|
| 17 |
+
if not self.api_key or self.api_key == "gsk_...":
|
| 18 |
+
raise ValueError("GROQ_API_KEY is missing. Please set it in your .env file.")
|
| 19 |
+
if self._client is None:
|
| 20 |
+
self._client = AsyncGroq(api_key=self.api_key)
|
| 21 |
+
return self._client
|
| 22 |
+
|
| 23 |
+
async def get_chat_response(self, request: ChatRequest) -> ChatResponse:
|
| 24 |
+
"""Non-streaming response logic"""
|
| 25 |
+
model_to_use = request.model or self.model
|
| 26 |
+
logger.info(f"Processing chat request with model: {model_to_use}")
|
| 27 |
+
|
| 28 |
+
# Prepare messages format for Groq
|
| 29 |
+
messages = [{"role": msg.role.value, "content": msg.content} for msg in request.messages]
|
| 30 |
+
|
| 31 |
+
response = await self.client.chat.completions.create(
|
| 32 |
+
model=model_to_use,
|
| 33 |
+
messages=messages,
|
| 34 |
+
temperature=request.temperature,
|
| 35 |
+
stream=False
|
| 36 |
+
)
|
| 37 |
+
|
| 38 |
+
content = response.choices[0].message.content
|
| 39 |
+
usage = response.usage.model_dump() if response.usage else {}
|
| 40 |
+
|
| 41 |
+
return ChatResponse(
|
| 42 |
+
content=content or "",
|
| 43 |
+
model=response.model or model_to_use,
|
| 44 |
+
usage=usage
|
| 45 |
+
)
|
| 46 |
+
|
| 47 |
+
async def stream_chat_response(self, request: ChatRequest) -> AsyncGenerator[str, None]:
|
| 48 |
+
"""Streaming response logic using SSE"""
|
| 49 |
+
model_to_use = request.model or self.model
|
| 50 |
+
logger.info(f"Starting stream for model: {model_to_use}")
|
| 51 |
+
|
| 52 |
+
messages = [{"role": msg.role.value, "content": msg.content} for msg in request.messages]
|
| 53 |
+
|
| 54 |
+
stream = await self.client.chat.completions.create(
|
| 55 |
+
model=model_to_use,
|
| 56 |
+
messages=messages,
|
| 57 |
+
temperature=request.temperature,
|
| 58 |
+
stream=True
|
| 59 |
+
)
|
| 60 |
+
|
| 61 |
+
async for chunk in stream:
|
| 62 |
+
content = chunk.choices[0].delta.content
|
| 63 |
+
if content is not None:
|
| 64 |
+
data = {"content": content, "done": False}
|
| 65 |
+
yield f"data: {json.dumps(data)}\n\n"
|
| 66 |
+
|
| 67 |
+
yield f"data: {json.dumps({'content': '', 'done': True})}\n\n"
|
| 68 |
+
|
| 69 |
+
chat_service = ChatService()
|
app/services/exam_service.py
ADDED
|
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import uuid
|
| 2 |
+
import json
|
| 3 |
+
from loguru import logger
|
| 4 |
+
from groq import AsyncGroq
|
| 5 |
+
|
| 6 |
+
from app.schemas import ExamGenerateRequest, ExamResponse, QuestionSchema
|
| 7 |
+
from app.core.config import settings
|
| 8 |
+
|
| 9 |
+
class ExamService:
|
| 10 |
+
def __init__(self):
|
| 11 |
+
# We enforce a Groq model for this feature to utilize JSON mode easily
|
| 12 |
+
self.model = "llama3-70b-8192"
|
| 13 |
+
self.api_key = settings.GROQ_API_KEY
|
| 14 |
+
self._client = None
|
| 15 |
+
|
| 16 |
+
@property
|
| 17 |
+
def client(self):
|
| 18 |
+
if not self.api_key or self.api_key == "gsk_...":
|
| 19 |
+
raise ValueError("GROQ_API_KEY is missing. Please set it in your .env file.")
|
| 20 |
+
if self._client is None:
|
| 21 |
+
self._client = AsyncGroq(api_key=self.api_key)
|
| 22 |
+
return self._client
|
| 23 |
+
|
| 24 |
+
async def generate_exam(self, request: ExamGenerateRequest) -> ExamResponse:
|
| 25 |
+
logger.info(f"Generating {request.num_questions} questions via Groq JSON mode logic.")
|
| 26 |
+
|
| 27 |
+
allowed_types = [qt.value for qt in request.question_types]
|
| 28 |
+
|
| 29 |
+
system_prompt = f"""
|
| 30 |
+
You are an expert academic examiner. Your task is to extract information from the user's provided content and generate exactly {request.num_questions} questions.
|
| 31 |
+
The difficulty level should be: {request.difficulty}.
|
| 32 |
+
Allowed question types: {allowed_types}.
|
| 33 |
+
|
| 34 |
+
IMPORTANT: Your output must be a pure JSON object using the following schema:
|
| 35 |
+
{{
|
| 36 |
+
"title": "A short descriptive title for the generated exam",
|
| 37 |
+
"questions": [
|
| 38 |
+
{{
|
| 39 |
+
"text": "The question text",
|
| 40 |
+
"type": "mcq" or "true_false" or "short_answer",
|
| 41 |
+
"options": ["List", "of", "Choices"], // ONLY include this if type is mcq
|
| 42 |
+
"answer": "The correct answer exactly as extracted/written",
|
| 43 |
+
"explanation": "Brief explanation of why this answer is correct"
|
| 44 |
+
}}
|
| 45 |
+
]
|
| 46 |
+
}}
|
| 47 |
+
"""
|
| 48 |
+
|
| 49 |
+
try:
|
| 50 |
+
response = await self.client.chat.completions.create(
|
| 51 |
+
model=self.model,
|
| 52 |
+
messages=[
|
| 53 |
+
{"role": "system", "content": system_prompt},
|
| 54 |
+
{"role": "user", "content": f"Content to generate questions from:\n\n{request.content}"}
|
| 55 |
+
],
|
| 56 |
+
response_format={"type": "json_object"},
|
| 57 |
+
temperature=0.3
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
raw_output = response.choices[0].message.content
|
| 61 |
+
parsed = json.loads(raw_output)
|
| 62 |
+
|
| 63 |
+
questions = []
|
| 64 |
+
for q in parsed.get("questions", []):
|
| 65 |
+
questions.append(QuestionSchema(
|
| 66 |
+
id=str(uuid.uuid4())[:8],
|
| 67 |
+
text=q.get("text", "Error generating text"),
|
| 68 |
+
type=q.get("type", "short_answer"),
|
| 69 |
+
options=q.get("options"),
|
| 70 |
+
answer=q.get("answer", "No answer provided"),
|
| 71 |
+
explanation=q.get("explanation")
|
| 72 |
+
))
|
| 73 |
+
|
| 74 |
+
return ExamResponse(
|
| 75 |
+
exam_id=str(uuid.uuid4()),
|
| 76 |
+
title=parsed.get("title", "AI Generated Exam"),
|
| 77 |
+
questions=questions
|
| 78 |
+
)
|
| 79 |
+
|
| 80 |
+
except json.JSONDecodeError as je:
|
| 81 |
+
logger.error(f"Failed to parse JSON from Groq: {je}")
|
| 82 |
+
raise ValueError("The AI did not return a valid JSON structure.")
|
| 83 |
+
except Exception as e:
|
| 84 |
+
logger.error(f"Failed to generate exam via Groq: {e}")
|
| 85 |
+
raise
|
| 86 |
+
|
| 87 |
+
exam_service = ExamService()
|
app/services/face_detection_service.py
ADDED
|
@@ -0,0 +1,72 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import time
|
| 3 |
+
import cv2
|
| 4 |
+
import numpy as np
|
| 5 |
+
from loguru import logger
|
| 6 |
+
from app.schemas.proctoring import FaceDetectionResponse, BoundingBox
|
| 7 |
+
from app.utils.image_utils import encode_image_to_base64
|
| 8 |
+
|
| 9 |
+
class FaceDetectionService:
|
| 10 |
+
def __init__(self):
|
| 11 |
+
self._detector = None
|
| 12 |
+
|
| 13 |
+
def load_model(self, model_path: str):
|
| 14 |
+
"""Called once during app startup via lifespan"""
|
| 15 |
+
self._detector = cv2.FaceDetectorYN.create(model_path, "", (320, 320))
|
| 16 |
+
logger.info(f"YuNet face detector loaded: {model_path}")
|
| 17 |
+
|
| 18 |
+
async def detect(self, image: np.ndarray) -> FaceDetectionResponse:
|
| 19 |
+
"""Run face detection on a single frame"""
|
| 20 |
+
# Run in threadpool since OpenCV is sync/CPU-bound
|
| 21 |
+
return await asyncio.to_thread(self._detect_sync, image)
|
| 22 |
+
|
| 23 |
+
def _detect_sync(self, image: np.ndarray) -> FaceDetectionResponse:
|
| 24 |
+
start_time = time.perf_counter()
|
| 25 |
+
h, w = image.shape[:2]
|
| 26 |
+
self._detector.setInputSize((w, h))
|
| 27 |
+
|
| 28 |
+
_, faces = self._detector.detect(image)
|
| 29 |
+
|
| 30 |
+
processing_time_ms = (time.perf_counter() - start_time) * 1000
|
| 31 |
+
|
| 32 |
+
if faces is None:
|
| 33 |
+
return FaceDetectionResponse(
|
| 34 |
+
face_count=0,
|
| 35 |
+
status="no_face",
|
| 36 |
+
violation=True,
|
| 37 |
+
processing_time_ms=processing_time_ms
|
| 38 |
+
)
|
| 39 |
+
|
| 40 |
+
face_count = len(faces)
|
| 41 |
+
if face_count > 1:
|
| 42 |
+
return FaceDetectionResponse(
|
| 43 |
+
face_count=face_count,
|
| 44 |
+
status="multiple_faces",
|
| 45 |
+
violation=True,
|
| 46 |
+
processing_time_ms=processing_time_ms
|
| 47 |
+
)
|
| 48 |
+
|
| 49 |
+
# face_count == 1 -> extract bbox and crop face
|
| 50 |
+
x, y, w_box, h_box = faces[0][:4].astype(int)
|
| 51 |
+
|
| 52 |
+
# Ensure bounding box is within image bounds
|
| 53 |
+
x = max(0, x)
|
| 54 |
+
y = max(0, y)
|
| 55 |
+
w_box = min(w - x, w_box)
|
| 56 |
+
h_box = min(h - y, h_box)
|
| 57 |
+
|
| 58 |
+
face_img = image[y:y+h_box, x:x+w_box]
|
| 59 |
+
face_b64 = encode_image_to_base64(face_img)
|
| 60 |
+
bbox = BoundingBox(x=x, y=y, width=w_box, height=h_box)
|
| 61 |
+
|
| 62 |
+
return FaceDetectionResponse(
|
| 63 |
+
face_count=1,
|
| 64 |
+
status="single_face",
|
| 65 |
+
bounding_box=bbox,
|
| 66 |
+
face_image=face_b64,
|
| 67 |
+
violation=False,
|
| 68 |
+
processing_time_ms=processing_time_ms
|
| 69 |
+
)
|
| 70 |
+
|
| 71 |
+
# Singleton instance
|
| 72 |
+
face_detection_service = FaceDetectionService()
|
app/services/face_recognition_service.py
ADDED
|
@@ -0,0 +1,153 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
from typing import List
|
| 3 |
+
|
| 4 |
+
import numpy as np
|
| 5 |
+
from deepface import DeepFace
|
| 6 |
+
from scipy.spatial.distance import cosine
|
| 7 |
+
from loguru import logger
|
| 8 |
+
|
| 9 |
+
from app.schemas.proctoring import FaceEmbeddingResponse, FaceVerifyResponse
|
| 10 |
+
|
| 11 |
+
|
| 12 |
+
class FaceRecognitionService:
|
| 13 |
+
"""
|
| 14 |
+
خدمة التعرف على الوجوه (Face Recognition Service) - Stateless Architecture.
|
| 15 |
+
|
| 16 |
+
الخدمة دي بقت "محرك تحليل فقط":
|
| 17 |
+
1. تسجيل الوجه (Register): بتاخد صورة ← تطلع منها "بصمة رقمية" (Embedding) ← ترجعها أرقام للـ .NET يسيفها.
|
| 18 |
+
2. التحقق (Verify): بتاخد صورة جديدة + البصمة القديمة (من الـ .NET) ← تقارن بينهم ← ترجع النتيجة.
|
| 19 |
+
|
| 20 |
+
⚠️ الخدمة دي مش بتحفظ أي حاجة على الهارد ديسك! كل التخزين مسؤولية الـ .NET API.
|
| 21 |
+
"""
|
| 22 |
+
|
| 23 |
+
def __init__(self):
|
| 24 |
+
# اسم الموديل الذكي (Facenet من جوجل - دقة عالية جداً)
|
| 25 |
+
self._model_name = "Facenet"
|
| 26 |
+
|
| 27 |
+
# Threshold: لو المسافة أقل من 0.40 يبقى نفس الشخص (Verified)
|
| 28 |
+
# Facenet cosine distance default threshold
|
| 29 |
+
self._threshold = 0.40
|
| 30 |
+
|
| 31 |
+
# لو المسافة بين 0.40 و 0.60 يبقى مشبوه (Suspicious)
|
| 32 |
+
# لو أكبر من 0.60 يبقى شخص تاني (Rejected)
|
| 33 |
+
self._hard_reject = 0.60
|
| 34 |
+
|
| 35 |
+
# ─────────────────────────────────────────────
|
| 36 |
+
# Registration: استخراج البصمة وإرجاعها للـ .NET
|
| 37 |
+
# ─────────────────────────────────────────────
|
| 38 |
+
|
| 39 |
+
async def register_student(self, student_id: str, face_images: List[np.ndarray]) -> FaceEmbeddingResponse:
|
| 40 |
+
"""
|
| 41 |
+
تسجيل الطالب (Async).
|
| 42 |
+
بنشغل الموديل في Thread منفصل عشان السيرفر مايتعلقش.
|
| 43 |
+
"""
|
| 44 |
+
return await asyncio.to_thread(self._register_sync, student_id, face_images)
|
| 45 |
+
|
| 46 |
+
def _register_sync(self, student_id: str, face_images: List[np.ndarray]) -> FaceEmbeddingResponse:
|
| 47 |
+
"""
|
| 48 |
+
الشغل الفعلي لتسجيل الطالب:
|
| 49 |
+
1. بياخد الصورة.
|
| 50 |
+
2. يستخرج البصمة (Embedding) منها.
|
| 51 |
+
3. يرجع الأرقام للـ .NET عشان يسيفها في PostgreSQL.
|
| 52 |
+
⚠️ لا يتم حفظ أي ملفات على الهارد ديسك.
|
| 53 |
+
"""
|
| 54 |
+
embeddings = []
|
| 55 |
+
last_error = "Unknown Error"
|
| 56 |
+
|
| 57 |
+
for img in face_images:
|
| 58 |
+
try:
|
| 59 |
+
emb = self._get_embedding(img)
|
| 60 |
+
embeddings.append(emb)
|
| 61 |
+
except Exception as e:
|
| 62 |
+
last_error = str(e)
|
| 63 |
+
logger.exception(f"Failed to get embedding during registration: {e}")
|
| 64 |
+
|
| 65 |
+
if not embeddings:
|
| 66 |
+
raise ValueError(f"Could not extract any embeddings for student {student_id}. Reason: {last_error}")
|
| 67 |
+
|
| 68 |
+
# بنحسب متوسط كل الـ embeddings عشان نرجع بصمة واحدة موحدة
|
| 69 |
+
avg_embedding = np.mean(embeddings, axis=0)
|
| 70 |
+
|
| 71 |
+
return FaceEmbeddingResponse(
|
| 72 |
+
student_id=student_id,
|
| 73 |
+
embeddings_count=len(embeddings),
|
| 74 |
+
registered=True,
|
| 75 |
+
embedding=avg_embedding.tolist() # ← دي الأرقام اللي الـ .NET هيسيفها
|
| 76 |
+
)
|
| 77 |
+
|
| 78 |
+
# ─────────────────────────────────────────────
|
| 79 |
+
# Verification: مقارنة صورة جديدة ببصمة قديمة
|
| 80 |
+
# ─────────────────────────────────────────────
|
| 81 |
+
|
| 82 |
+
async def verify_face(
|
| 83 |
+
self,
|
| 84 |
+
student_id: str,
|
| 85 |
+
face_image: np.ndarray,
|
| 86 |
+
reference_embedding: List[float]
|
| 87 |
+
) -> FaceVerifyResponse:
|
| 88 |
+
"""
|
| 89 |
+
التحقق من الوجه (Async).
|
| 90 |
+
الـ .NET بيبعتلنا الصورة الجديدة + البصمة القديمة اللي متسيفة عنده.
|
| 91 |
+
"""
|
| 92 |
+
return await asyncio.to_thread(
|
| 93 |
+
self._verify_sync, student_id, face_image, reference_embedding
|
| 94 |
+
)
|
| 95 |
+
|
| 96 |
+
def _verify_sync(
|
| 97 |
+
self,
|
| 98 |
+
student_id: str,
|
| 99 |
+
face_image: np.ndarray,
|
| 100 |
+
reference_embedding: List[float]
|
| 101 |
+
) -> FaceVerifyResponse:
|
| 102 |
+
"""
|
| 103 |
+
الشغل الفعلي للتحقق:
|
| 104 |
+
1. بيطلع البصمة من الصورة الجديدة (Live).
|
| 105 |
+
2. بيقارنها بالبصمة اللي الـ .NET بعتهاله (Reference).
|
| 106 |
+
3. بيحسب المسافة (Cosine Distance) ويقرر النتيجة.
|
| 107 |
+
⚠️ لا يتم قراءة أي ملفات من الهارد ديسك.
|
| 108 |
+
"""
|
| 109 |
+
try:
|
| 110 |
+
live_emb = self._get_embedding(face_image)
|
| 111 |
+
except Exception as e:
|
| 112 |
+
logger.error(f"Failed to get embedding during verification: {e}")
|
| 113 |
+
raise ValueError("Failed to extract embedding from the live image")
|
| 114 |
+
|
| 115 |
+
# تحويل البصمة المرسلة من الـ .NET لـ numpy array
|
| 116 |
+
stored_emb = np.array(reference_embedding)
|
| 117 |
+
|
| 118 |
+
# حساب المسافة (الاختلاف) بين الصورتين
|
| 119 |
+
distance = cosine(live_emb, stored_emb)
|
| 120 |
+
|
| 121 |
+
# تحديد النتيجة بناءً على الـ Threshold
|
| 122 |
+
if distance < self._threshold:
|
| 123 |
+
status = "verified" # الطالب هو هو ✅
|
| 124 |
+
elif distance < self._hard_reject:
|
| 125 |
+
status = "suspicious" # مشبوه ⚠️
|
| 126 |
+
else:
|
| 127 |
+
status = "rejected" # شخص تاني ❌
|
| 128 |
+
|
| 129 |
+
return FaceVerifyResponse(
|
| 130 |
+
verified=(status == "verified"),
|
| 131 |
+
distance=round(float(distance), 4),
|
| 132 |
+
status=status,
|
| 133 |
+
threshold=self._threshold
|
| 134 |
+
)
|
| 135 |
+
|
| 136 |
+
# ─────────────────────────────────────────────
|
| 137 |
+
# Core: استخراج البصمة من الصورة
|
| 138 |
+
# ─────────────────────────────────────────────
|
| 139 |
+
|
| 140 |
+
def _get_embedding(self, face_img: np.ndarray) -> np.ndarray:
|
| 141 |
+
"""
|
| 142 |
+
الدالة الأساسية: بتتواصل مع DeepFace وتستخرج البصمة.
|
| 143 |
+
البصمة = مصفوفة 512 رقم عشري بتمثل ملامح الوش.
|
| 144 |
+
"""
|
| 145 |
+
result = DeepFace.represent(
|
| 146 |
+
face_img,
|
| 147 |
+
model_name=self._model_name,
|
| 148 |
+
enforce_detection=False
|
| 149 |
+
)
|
| 150 |
+
return np.array(result[0]["embedding"])
|
| 151 |
+
|
| 152 |
+
|
| 153 |
+
face_recognition_service = FaceRecognitionService()
|
app/services/object_detection_service.py
ADDED
|
@@ -0,0 +1,181 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import asyncio
|
| 2 |
+
import time
|
| 3 |
+
import os
|
| 4 |
+
from typing import List
|
| 5 |
+
|
| 6 |
+
import cv2
|
| 7 |
+
import numpy as np
|
| 8 |
+
import onnxruntime as ort
|
| 9 |
+
from loguru import logger
|
| 10 |
+
|
| 11 |
+
from app.schemas.proctoring import ObjectDetectionResponse, ViolationItem, BoundingBox
|
| 12 |
+
from app.utils.image_utils import encode_image_to_base64
|
| 13 |
+
|
| 14 |
+
EXAM_CLASSES: dict[int, str] = {
|
| 15 |
+
0: "extra_person",
|
| 16 |
+
63: "laptop_detected",
|
| 17 |
+
67: "mobile_detected",
|
| 18 |
+
73: "book_detected",
|
| 19 |
+
}
|
| 20 |
+
|
| 21 |
+
VIOLATION_COLORS: dict[str, tuple] = {
|
| 22 |
+
"extra_person": (0, 0, 255), # red
|
| 23 |
+
"laptop_detected": (255, 200, 0 ), # cyan-yellow
|
| 24 |
+
"mobile_detected": (0, 140, 255), # orange
|
| 25 |
+
"book_detected": (255, 0, 180), # purple
|
| 26 |
+
}
|
| 27 |
+
|
| 28 |
+
INPUT_SIZE = 640
|
| 29 |
+
NMS_THRESHOLD = 0.45
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
class ObjectDetectionService:
|
| 33 |
+
def __init__(self):
|
| 34 |
+
self._session: ort.InferenceSession | None = None
|
| 35 |
+
|
| 36 |
+
def load_model(self, model_path: str):
|
| 37 |
+
"""Load YOLOv8 ONNX session during startup"""
|
| 38 |
+
if not os.path.exists(model_path):
|
| 39 |
+
raise FileNotFoundError(f"ONNX model not found: {model_path}")
|
| 40 |
+
|
| 41 |
+
self._session = ort.InferenceSession(
|
| 42 |
+
model_path,
|
| 43 |
+
providers=["CPUExecutionProvider"],
|
| 44 |
+
)
|
| 45 |
+
logger.info(f"YOLOv8 ONNX session ready: {model_path}")
|
| 46 |
+
|
| 47 |
+
async def detect(self, image: np.ndarray,
|
| 48 |
+
detect_books: bool = True,
|
| 49 |
+
conf_threshold: float = 0.50,
|
| 50 |
+
return_annotated: bool = False
|
| 51 |
+
) -> ObjectDetectionResponse:
|
| 52 |
+
"""Run object detection — delegates to threadpool"""
|
| 53 |
+
return await asyncio.to_thread(
|
| 54 |
+
self._detect_sync, image, detect_books, conf_threshold, return_annotated
|
| 55 |
+
)
|
| 56 |
+
|
| 57 |
+
def _preprocess(self, frame: np.ndarray) -> tuple[np.ndarray, float, int, int]:
|
| 58 |
+
h, w = frame.shape[:2]
|
| 59 |
+
scale = INPUT_SIZE / max(h, w)
|
| 60 |
+
new_w, new_h = int(w * scale), int(h * scale)
|
| 61 |
+
resized = cv2.resize(frame, (new_w, new_h))
|
| 62 |
+
|
| 63 |
+
pad_x = (INPUT_SIZE - new_w) // 2
|
| 64 |
+
pad_y = (INPUT_SIZE - new_h) // 2
|
| 65 |
+
canvas = np.zeros((INPUT_SIZE, INPUT_SIZE, 3), dtype=np.uint8)
|
| 66 |
+
canvas[pad_y:pad_y + new_h, pad_x:pad_x + new_w] = resized
|
| 67 |
+
|
| 68 |
+
# HWC BGR -> CHW RGB -> NCHW float32
|
| 69 |
+
tensor = canvas[:, :, ::-1].transpose(2, 0, 1).astype(np.float32) / 255.0
|
| 70 |
+
tensor = tensor[np.newaxis] # [1, 3, H, W]
|
| 71 |
+
return tensor, scale, pad_x, pad_y
|
| 72 |
+
|
| 73 |
+
def _detect_sync(self, image: np.ndarray, detect_books: bool,
|
| 74 |
+
conf_threshold: float, return_annotated: bool) -> ObjectDetectionResponse:
|
| 75 |
+
|
| 76 |
+
start_time = time.perf_counter()
|
| 77 |
+
|
| 78 |
+
input_name = self._session.get_inputs()[0].name
|
| 79 |
+
tensor, scale, pad_x, pad_y = self._preprocess(image)
|
| 80 |
+
outputs = self._session.run(None, {input_name: tensor})
|
| 81 |
+
preds = outputs[0][0] # [84, N]
|
| 82 |
+
|
| 83 |
+
boxes_raw, scores_raw, class_ids_raw = [], [], []
|
| 84 |
+
person_count = 0
|
| 85 |
+
|
| 86 |
+
num_detections = preds.shape[1]
|
| 87 |
+
for i in range(num_detections):
|
| 88 |
+
col = preds[:, i]
|
| 89 |
+
class_confs = col[4:]
|
| 90 |
+
class_id = int(np.argmax(class_confs))
|
| 91 |
+
confidence = float(class_confs[class_id])
|
| 92 |
+
|
| 93 |
+
if confidence < conf_threshold:
|
| 94 |
+
continue
|
| 95 |
+
if class_id not in EXAM_CLASSES:
|
| 96 |
+
continue
|
| 97 |
+
if EXAM_CLASSES[class_id] == "book_detected" and not detect_books:
|
| 98 |
+
continue
|
| 99 |
+
|
| 100 |
+
cx = (col[0] - pad_x) / scale
|
| 101 |
+
cy = (col[1] - pad_y) / scale
|
| 102 |
+
bw = col[2] / scale
|
| 103 |
+
bh = col[3] / scale
|
| 104 |
+
x = int(cx - bw / 2)
|
| 105 |
+
y = int(cy - bh / 2)
|
| 106 |
+
|
| 107 |
+
boxes_raw.append([x, y, int(bw), int(bh)])
|
| 108 |
+
scores_raw.append(confidence)
|
| 109 |
+
class_ids_raw.append(class_id)
|
| 110 |
+
|
| 111 |
+
violations: List[ViolationItem] = []
|
| 112 |
+
annotated_image = None
|
| 113 |
+
|
| 114 |
+
if boxes_raw:
|
| 115 |
+
indices = cv2.dnn.NMSBoxes(boxes_raw, scores_raw, conf_threshold, NMS_THRESHOLD)
|
| 116 |
+
|
| 117 |
+
if len(indices) > 0:
|
| 118 |
+
for idx in indices.flatten():
|
| 119 |
+
cid = class_ids_raw[idx]
|
| 120 |
+
vtype = EXAM_CLASSES[cid]
|
| 121 |
+
conf = round(scores_raw[idx], 3)
|
| 122 |
+
|
| 123 |
+
# Prevent negative box coordinates
|
| 124 |
+
bx, by, bw, bh = boxes_raw[idx]
|
| 125 |
+
bx = max(0, bx)
|
| 126 |
+
by = max(0, by)
|
| 127 |
+
|
| 128 |
+
bbox = BoundingBox(x=bx, y=by, width=bw, height=bh)
|
| 129 |
+
|
| 130 |
+
if vtype == "extra_person":
|
| 131 |
+
person_count += 1
|
| 132 |
+
continue
|
| 133 |
+
|
| 134 |
+
violations.append(ViolationItem(type=vtype, confidence=conf, bounding_box=bbox))
|
| 135 |
+
|
| 136 |
+
if person_count >= 2:
|
| 137 |
+
violations.append(ViolationItem(
|
| 138 |
+
type="extra_person",
|
| 139 |
+
confidence=1.0,
|
| 140 |
+
bounding_box=BoundingBox(x=0, y=0, width=0, height=0)
|
| 141 |
+
))
|
| 142 |
+
|
| 143 |
+
processing_time_ms = (time.perf_counter() - start_time) * 1000
|
| 144 |
+
|
| 145 |
+
if return_annotated and (violations or person_count >= 2):
|
| 146 |
+
annotated_frame = self._draw_violations(image.copy(), violations, person_count)
|
| 147 |
+
annotated_image = encode_image_to_base64(annotated_frame)
|
| 148 |
+
|
| 149 |
+
return ObjectDetectionResponse(
|
| 150 |
+
violations=violations,
|
| 151 |
+
violation_count=len(violations),
|
| 152 |
+
has_violations=bool(violations),
|
| 153 |
+
annotated_image=annotated_image,
|
| 154 |
+
processing_time_ms=processing_time_ms
|
| 155 |
+
)
|
| 156 |
+
|
| 157 |
+
def _draw_violations(self, frame: np.ndarray, violations: List[ViolationItem], person_count: int) -> np.ndarray:
|
| 158 |
+
for v in violations:
|
| 159 |
+
if v.type == "extra_person":
|
| 160 |
+
continue # no specific box for extra person usually, handled by full frame warning if needed
|
| 161 |
+
|
| 162 |
+
x, y, w, h = v.bounding_box.x, v.bounding_box.y, v.bounding_box.width, v.bounding_box.height
|
| 163 |
+
color = VIOLATION_COLORS.get(v.type, (0, 0, 255))
|
| 164 |
+
label = f"{v.type} {v.confidence:.0%}"
|
| 165 |
+
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
|
| 166 |
+
cv2.putText(
|
| 167 |
+
frame, label,
|
| 168 |
+
(x, max(y - 8, 16)),
|
| 169 |
+
cv2.FONT_HERSHEY_SIMPLEX, 0.6, color, 2,
|
| 170 |
+
)
|
| 171 |
+
|
| 172 |
+
if person_count >= 2:
|
| 173 |
+
cv2.putText(
|
| 174 |
+
frame, "EXTRA PERSON DETECTED", (10, 30),
|
| 175 |
+
cv2.FONT_HERSHEY_SIMPLEX, 0.8, (0, 0, 255), 2
|
| 176 |
+
)
|
| 177 |
+
|
| 178 |
+
return frame
|
| 179 |
+
|
| 180 |
+
|
| 181 |
+
object_detection_service = ObjectDetectionService()
|
app/utils/__init__.py
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
# Utils package
|
app/utils/image_utils.py
ADDED
|
@@ -0,0 +1,125 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""
|
| 2 |
+
image_utils.py
|
| 3 |
+
--------------
|
| 4 |
+
Utilities for converting between Base64 strings and OpenCV BGR numpy arrays.
|
| 5 |
+
Used by all proctoring services to decode incoming API images.
|
| 6 |
+
"""
|
| 7 |
+
import base64
|
| 8 |
+
import binascii
|
| 9 |
+
import re
|
| 10 |
+
|
| 11 |
+
import cv2
|
| 12 |
+
import numpy as np
|
| 13 |
+
|
| 14 |
+
|
| 15 |
+
# ─────────────────────────────────────────────
|
| 16 |
+
# Public API
|
| 17 |
+
# ─────────────────────────────────────────────
|
| 18 |
+
|
| 19 |
+
def decode_base64_image(b64_str: str) -> np.ndarray:
|
| 20 |
+
"""
|
| 21 |
+
Decode a Base64-encoded image string into a BGR numpy array.
|
| 22 |
+
|
| 23 |
+
Accepts:
|
| 24 |
+
- Raw base64 string
|
| 25 |
+
- Data URI: ``data:image/jpeg;base64,<data>``
|
| 26 |
+
|
| 27 |
+
Returns:
|
| 28 |
+
np.ndarray: BGR image (OpenCV format)
|
| 29 |
+
|
| 30 |
+
Raises:
|
| 31 |
+
ValueError: If the string is not valid base64 or not a valid image.
|
| 32 |
+
"""
|
| 33 |
+
if not b64_str:
|
| 34 |
+
raise ValueError("Image string is empty.")
|
| 35 |
+
|
| 36 |
+
# Strip data URI prefix if present (e.g. "data:image/png;base64,")
|
| 37 |
+
if b64_str.startswith("data:"):
|
| 38 |
+
match = re.match(r"data:[a-zA-Z0-9+/]+;base64,(.+)", b64_str, re.DOTALL)
|
| 39 |
+
if not match:
|
| 40 |
+
raise ValueError("Invalid data URI format.")
|
| 41 |
+
b64_str = match.group(1)
|
| 42 |
+
|
| 43 |
+
# Decode base64
|
| 44 |
+
try:
|
| 45 |
+
img_bytes = base64.b64decode(b64_str, validate=True)
|
| 46 |
+
except (binascii.Error, ValueError) as exc:
|
| 47 |
+
raise ValueError(f"Invalid base64 encoding: {exc}") from exc
|
| 48 |
+
|
| 49 |
+
# Decode image bytes
|
| 50 |
+
img_array = np.frombuffer(img_bytes, dtype=np.uint8)
|
| 51 |
+
image = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
|
| 52 |
+
|
| 53 |
+
if image is None:
|
| 54 |
+
raise ValueError("Could not decode image from provided bytes. "
|
| 55 |
+
"Ensure the image is a valid JPEG, PNG, or WebP.")
|
| 56 |
+
return image
|
| 57 |
+
|
| 58 |
+
|
| 59 |
+
def decode_image_from_bytes(img_bytes: bytes) -> np.ndarray:
|
| 60 |
+
"""
|
| 61 |
+
Decode raw image bytes into a BGR numpy array.
|
| 62 |
+
"""
|
| 63 |
+
img_array = np.frombuffer(img_bytes, dtype=np.uint8)
|
| 64 |
+
image = cv2.imdecode(img_array, cv2.IMREAD_COLOR)
|
| 65 |
+
if image is None:
|
| 66 |
+
raise ValueError("Could not decode image from provided bytes.")
|
| 67 |
+
return image
|
| 68 |
+
|
| 69 |
+
|
| 70 |
+
def encode_image_to_base64(image: np.ndarray, fmt: str = ".jpg",
|
| 71 |
+
quality: int = 85) -> str:
|
| 72 |
+
"""
|
| 73 |
+
Encode a BGR numpy array to a Base64 string.
|
| 74 |
+
|
| 75 |
+
Args:
|
| 76 |
+
image: BGR numpy array.
|
| 77 |
+
fmt: Output format extension (".jpg", ".png", ".webp").
|
| 78 |
+
quality: JPEG/WebP compression quality (0-100).
|
| 79 |
+
|
| 80 |
+
Returns:
|
| 81 |
+
str: Base64-encoded image string (no data URI prefix).
|
| 82 |
+
|
| 83 |
+
Raises:
|
| 84 |
+
ValueError: If encoding fails.
|
| 85 |
+
"""
|
| 86 |
+
encode_params: list = []
|
| 87 |
+
if fmt in (".jpg", ".jpeg"):
|
| 88 |
+
encode_params = [cv2.IMWRITE_JPEG_QUALITY, quality]
|
| 89 |
+
elif fmt == ".webp":
|
| 90 |
+
encode_params = [cv2.IMWRITE_WEBP_QUALITY, quality]
|
| 91 |
+
|
| 92 |
+
ok, buffer = cv2.imencode(fmt, image, encode_params)
|
| 93 |
+
if not ok:
|
| 94 |
+
raise ValueError(f"Failed to encode image as {fmt}.")
|
| 95 |
+
|
| 96 |
+
return base64.b64encode(buffer.tobytes()).decode("utf-8")
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
def validate_and_resize(
|
| 100 |
+
image: np.ndarray,
|
| 101 |
+
max_width: int = 1920,
|
| 102 |
+
max_height: int = 1080,
|
| 103 |
+
min_width: int = 32,
|
| 104 |
+
min_height: int = 32,
|
| 105 |
+
) -> np.ndarray:
|
| 106 |
+
"""
|
| 107 |
+
Validate image dimensions and resize if it exceeds the max size.
|
| 108 |
+
|
| 109 |
+
Raises:
|
| 110 |
+
ValueError: If image is smaller than minimum dimensions.
|
| 111 |
+
"""
|
| 112 |
+
h, w = image.shape[:2]
|
| 113 |
+
|
| 114 |
+
if w < min_width or h < min_height:
|
| 115 |
+
raise ValueError(
|
| 116 |
+
f"Image too small ({w}x{h}). Minimum is {min_width}x{min_height}."
|
| 117 |
+
)
|
| 118 |
+
|
| 119 |
+
if w > max_width or h > max_height:
|
| 120 |
+
scale = min(max_width / w, max_height / h)
|
| 121 |
+
new_w = int(w * scale)
|
| 122 |
+
new_h = int(h * scale)
|
| 123 |
+
image = cv2.resize(image, (new_w, new_h), interpolation=cv2.INTER_AREA)
|
| 124 |
+
|
| 125 |
+
return image
|
main.py
ADDED
|
@@ -0,0 +1,88 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import time
|
| 2 |
+
from contextlib import asynccontextmanager
|
| 3 |
+
from fastapi import FastAPI, Request
|
| 4 |
+
from fastapi.middleware.cors import CORSMiddleware
|
| 5 |
+
from fastapi.middleware.gzip import GZipMiddleware
|
| 6 |
+
from fastapi.responses import JSONResponse
|
| 7 |
+
from app.api.router import api_router
|
| 8 |
+
from app.core.config import settings
|
| 9 |
+
from app.core.logging_config import setup_logging, RequestLoggingMiddleware
|
| 10 |
+
from loguru import logger
|
| 11 |
+
|
| 12 |
+
@asynccontextmanager
|
| 13 |
+
async def lifespan(app: FastAPI):
|
| 14 |
+
# Startup: Load ML models or init clients
|
| 15 |
+
logger.info("--- Starting up Examify AI Services ---")
|
| 16 |
+
|
| 17 |
+
from app.services.face_detection_service import face_detection_service
|
| 18 |
+
from app.services.object_detection_service import object_detection_service
|
| 19 |
+
|
| 20 |
+
# Load models (Face Recognition is stateless - no initialization needed)
|
| 21 |
+
face_detection_service.load_model(settings.FACE_DETECTION_MODEL_PATH)
|
| 22 |
+
object_detection_service.load_model(settings.OBJECT_DETECTION_MODEL_PATH)
|
| 23 |
+
|
| 24 |
+
app.state.models = {
|
| 25 |
+
"face_detection": "loaded",
|
| 26 |
+
"object_detection": "loaded",
|
| 27 |
+
"face_recognition": "stateless (no local storage)"
|
| 28 |
+
}
|
| 29 |
+
logger.info("All ML models loaded successfully (Face Recognition is stateless)")
|
| 30 |
+
|
| 31 |
+
yield
|
| 32 |
+
|
| 33 |
+
# Shutdown: Cleanup
|
| 34 |
+
logger.info("--- Shutting down Examify AI Services ---")
|
| 35 |
+
app.state.models.clear()
|
| 36 |
+
|
| 37 |
+
def create_app() -> FastAPI:
|
| 38 |
+
app = FastAPI(
|
| 39 |
+
title=settings.APP_NAME,
|
| 40 |
+
version="1.0.0",
|
| 41 |
+
lifespan=lifespan,
|
| 42 |
+
docs_url="/docs",
|
| 43 |
+
redoc_url="/redoc",
|
| 44 |
+
)
|
| 45 |
+
|
| 46 |
+
# Setup Logging
|
| 47 |
+
setup_logging()
|
| 48 |
+
|
| 49 |
+
# Add Middlewares
|
| 50 |
+
app.add_middleware(
|
| 51 |
+
CORSMiddleware,
|
| 52 |
+
allow_origins=settings.CORS_ORIGINS,
|
| 53 |
+
allow_credentials=True,
|
| 54 |
+
allow_methods=["*"],
|
| 55 |
+
allow_headers=["*"],
|
| 56 |
+
)
|
| 57 |
+
|
| 58 |
+
app.add_middleware(GZipMiddleware, minimum_size=settings.GZIP_MIN_SIZE)
|
| 59 |
+
app.add_middleware(RequestLoggingMiddleware)
|
| 60 |
+
|
| 61 |
+
# Exception Handlers
|
| 62 |
+
@app.exception_handler(Exception)
|
| 63 |
+
async def global_exception_handler(request: Request, exc: Exception):
|
| 64 |
+
logger.error(f"Global Error: {str(exc)}")
|
| 65 |
+
return JSONResponse(
|
| 66 |
+
status_code=500,
|
| 67 |
+
content={
|
| 68 |
+
"error": "Internal Server Error",
|
| 69 |
+
"detail": str(exc) if settings.DEBUG else None,
|
| 70 |
+
"request_id": request.headers.get("X-Request-ID", "unknown")
|
| 71 |
+
}
|
| 72 |
+
)
|
| 73 |
+
|
| 74 |
+
# Include Routes
|
| 75 |
+
app.include_router(api_router, prefix=settings.API_V1_STR)
|
| 76 |
+
|
| 77 |
+
@app.get("/health", tags=["Health"])
|
| 78 |
+
async def health_check():
|
| 79 |
+
return {"status": "healthy", "timestamp": time.time()}
|
| 80 |
+
|
| 81 |
+
return app
|
| 82 |
+
|
| 83 |
+
app = create_app()
|
| 84 |
+
|
| 85 |
+
if __name__ == "__main__":
|
| 86 |
+
import uvicorn
|
| 87 |
+
uvicorn.run("main:app", host="0.0.0.0", port=8000, reload=True)
|
| 88 |
+
|
requirements.txt
ADDED
|
@@ -0,0 +1,16 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
fastapi[all]>=0.110.0
|
| 2 |
+
pydantic-settings>=2.2.1
|
| 3 |
+
python-dotenv>=1.0.1
|
| 4 |
+
loguru>=0.7.2
|
| 5 |
+
python-multipart>=0.0.9
|
| 6 |
+
uvicorn>=0.29.0
|
| 7 |
+
# === AI Providers ===
|
| 8 |
+
groq>=0.5.0
|
| 9 |
+
|
| 10 |
+
# === AI Proctoring ===
|
| 11 |
+
opencv-python-headless>=4.9.0
|
| 12 |
+
onnxruntime>=1.17.0
|
| 13 |
+
numpy>=1.26.0
|
| 14 |
+
deepface>=0.0.89
|
| 15 |
+
tf-keras>=2.16.0
|
| 16 |
+
scipy>=1.12.0
|