Kaadan commited on
Commit
358dfff
·
1 Parent(s): a85513d

initial commit

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. backend/.env.example +34 -0
  2. backend/.gitignore +307 -0
  3. backend/DOCKER_README.md +94 -0
  4. backend/Dockerfile +32 -0
  5. backend/QWEN.md +240 -0
  6. backend/README.md +41 -0
  7. backend/alembic.ini +149 -0
  8. backend/alembic/README +1 -0
  9. backend/alembic/env.py +83 -0
  10. backend/alembic/script.py.mako +28 -0
  11. backend/alembic/versions/1172a2fbb171_initial_migration_for_assessment_.py +117 -0
  12. backend/alembic/versions/91905f51740d_final_structure_for_technical_.py +32 -0
  13. backend/alembic/versions/9facd9b60600_seed_demo_data_with_hr_and_candidate_.py +396 -0
  14. backend/api/__init__.py +0 -0
  15. backend/api/application_routes.py +88 -0
  16. backend/api/assessment_routes.py +148 -0
  17. backend/api/job_routes.py +127 -0
  18. backend/api/routes.py +29 -0
  19. backend/api/user_routes.py +76 -0
  20. backend/config.py +38 -0
  21. backend/database/__init__.py +0 -0
  22. backend/database/database.py +26 -0
  23. backend/docker-compose.yml +33 -0
  24. backend/integrations/ai_integration/ai_factory.py +78 -0
  25. backend/integrations/ai_integration/ai_generator_interface.py +57 -0
  26. backend/integrations/ai_integration/anthropic_generator.py +39 -0
  27. backend/integrations/ai_integration/google_ai_generator.py +39 -0
  28. backend/integrations/ai_integration/mock_ai_generator.py +378 -0
  29. backend/integrations/ai_integration/openai_generator.py +39 -0
  30. backend/logging_config.py +37 -0
  31. backend/main.py +54 -0
  32. backend/models/__init__.py +7 -0
  33. backend/models/application.py +12 -0
  34. backend/models/assessment.py +61 -0
  35. backend/models/base.py +3 -0
  36. backend/models/job.py +31 -0
  37. backend/models/user.py +42 -0
  38. backend/requirements-dev.txt +6 -0
  39. backend/requirements-test.txt +6 -0
  40. backend/requirements.md +98 -0
  41. backend/requirements.txt +0 -0
  42. backend/run_tests.py +36 -0
  43. backend/schemas/__init__.py +11 -0
  44. backend/schemas/application.py +67 -0
  45. backend/schemas/assessment.py +56 -0
  46. backend/schemas/base.py +5 -0
  47. backend/schemas/candidate_assessment.py +28 -0
  48. backend/schemas/enums.py +21 -0
  49. backend/schemas/job.py +35 -0
  50. backend/schemas/question.py +30 -0
backend/.env.example ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Database Configuration
2
+ DATABASE_URL=sqlite:///./assessment_platform.db
3
+
4
+ # Server Configuration
5
+ HOST=0.0.0.0
6
+ PORT=8000
7
+ DEBUG=False
8
+
9
+ # Logging Configuration
10
+ LOG_LEVEL=INFO
11
+ LOG_FILE=app.log
12
+ LOG_FORMAT=%(asctime)s - %(name)s - %(levelname)s - %(message)s
13
+
14
+ # JWT Configuration (for future use)
15
+ SECRET_KEY=your-secret-key-here
16
+ ALGORITHM=HS256
17
+ ACCESS_TOKEN_EXPIRE_MINUTES=30
18
+
19
+ # Application Configuration
20
+ APP_NAME=AI-Powered Hiring Assessment Platform
21
+ APP_VERSION=0.1.0
22
+ APP_DESCRIPTION=MVP for managing hiring assessments using AI
23
+
24
+ # AI Provider Configuration (for future use)
25
+ OPENAI_API_KEY=
26
+ ANTHROPIC_API_KEY=
27
+ GOOGLE_AI_API_KEY=
28
+
29
+ # Email Configuration (for future use)
30
+ SMTP_SERVER=
31
+ SMTP_PORT=
32
+ SMTP_USERNAME=
33
+ SMTP_PASSWORD=
34
+ FROM_EMAIL=
backend/.gitignore ADDED
@@ -0,0 +1,307 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+ *.so
6
+
7
+ # Distribution / packaging
8
+ .Python
9
+ build/
10
+ develop-eggs/
11
+ dist/
12
+ downloads/
13
+ eggs/
14
+ .eggs/
15
+ lib/
16
+ lib64/
17
+ parts/
18
+ sdist/
19
+ var/
20
+ wheels/
21
+ *.egg-info/
22
+ .installed.cfg
23
+ *.egg
24
+
25
+ # PyInstaller
26
+ *.manifest
27
+ *.spec
28
+
29
+ # Installer logs
30
+ pip-log.txt
31
+ pip-delete-this-directory.txt
32
+
33
+ # Unit test / coverage reports
34
+ htmlcov/
35
+ .tox/
36
+ .nox/
37
+ .coverage
38
+ .coverage.*
39
+ .cache
40
+ nosetests.xml
41
+ coverage.xml
42
+ *.cover
43
+ *.py,cover
44
+ .hypothesis/
45
+ .pytest_cache/
46
+
47
+ # Translations
48
+ *.mo
49
+ *.pot
50
+
51
+ # Django stuff:
52
+ *.log
53
+ local_settings.py
54
+ db.sqlite3
55
+ db.sqlite3-journal
56
+
57
+ # Flask stuff:
58
+ instance/
59
+ .webassets-cache
60
+
61
+ # Scrapy stuff:
62
+ .scrapy
63
+
64
+ # Sphinx documentation docs/
65
+ docs/_build/
66
+
67
+ # PyBuilder
68
+ target/
69
+
70
+ # Jupyter Notebook
71
+ .ipynb_checkpoints
72
+
73
+ # IPython
74
+ profile_default/
75
+ ipython_config.py
76
+
77
+ # pyenv
78
+ .python-version
79
+
80
+ # pipenv
81
+ Pipfile.lock
82
+
83
+ # PEP 582
84
+ __pypackages__/
85
+
86
+ # Celery stuff
87
+ celerybeat-schedule
88
+ celerybeat.pid
89
+
90
+ # SageMath parsed files
91
+ *.sage.py
92
+
93
+ # Environments
94
+ .env
95
+ .venv
96
+ env/
97
+ venv/
98
+ ENV/
99
+ env.bak/
100
+ venv.bak/
101
+
102
+ # Spyder project settings
103
+ .spyderproject
104
+ .spyproject
105
+
106
+ # Rope project settings
107
+ .ropeproject
108
+
109
+ # mkdocs documentation
110
+ /site
111
+
112
+ # mypy
113
+ .mypy_cache/
114
+ .dmypy.json
115
+ dmypy.json
116
+
117
+ # Pyre type checker
118
+ .pyre/
119
+
120
+ # IDE
121
+ .vscode/
122
+ .idea/
123
+ *.swp
124
+ *.swo
125
+ *~
126
+
127
+ # OS generated files
128
+ .DS_Store
129
+ .DS_Store?
130
+ ._*
131
+ .Spotlight-V100
132
+ .Trashes
133
+ ehthumbs.db
134
+ Thumbs.db
135
+ desktop.ini
136
+
137
+ # Database
138
+ *.db
139
+ *.db-journal
140
+
141
+ # Local SQLite database
142
+ assessment_platform.db
143
+ test_assessment_platform.db
144
+
145
+ # Environment variables
146
+ .env
147
+
148
+ # Logs
149
+ logs/
150
+ *.log
151
+
152
+ # Coverage
153
+ .coverage*
154
+ htmlcov/
155
+
156
+ # Pytest
157
+ .pytest_cache/
158
+ __pycache__/
159
+
160
+ # Alembic
161
+ alembic/versions/*.pyc
162
+
163
+ # FastAPI docs
164
+ docs/
165
+
166
+ # Temporary files
167
+ *.tmp
168
+ *.temp
169
+
170
+ # Python virtual environments (specific to this project)
171
+ .venv/
172
+ venv/
173
+
174
+ # Local config files
175
+ config_local.py
176
+ local_settings.py
177
+
178
+ # Sensitive files
179
+ secrets.json
180
+ credentials.json
181
+ *.pem
182
+ *.key
183
+ *.crt
184
+ *.cert
185
+
186
+ # Cache directories
187
+ __pycache__/
188
+ *.pyc
189
+ *$py.class
190
+ *.so
191
+ .Python
192
+ jinja2/
193
+ webassets/
194
+ .sass-cache
195
+
196
+ # IDE specific files
197
+ .vscode/
198
+ .idea/
199
+ *.tmproj
200
+ *.sublime-project
201
+ *.sublime-workspace
202
+
203
+ # Testing
204
+ .pytest_cache/
205
+ .coverage
206
+ htmlcov/
207
+ .cov/
208
+ .coverage.*
209
+
210
+ # macOS
211
+ .DS_Store
212
+ .AppleDouble
213
+ .LSOverride
214
+ Icon?
215
+ **/.DS_Store
216
+ ._*
217
+ .Spotlight-V100
218
+ .Trashes
219
+ ehthumbs.db
220
+ Thumbs.db
221
+
222
+ # Windows
223
+ [Ee]xpress
224
+ [Dd]esktop.ini
225
+ $RECYCLE.BIN/
226
+ *.cab
227
+ *.msi
228
+ *.msix
229
+ *.msc
230
+ Thumbs.db
231
+ ehthumbs.db
232
+ Desktop.ini
233
+ *.lnk
234
+
235
+ # Python
236
+ *.py[cod]
237
+ *$py.class
238
+ *.so
239
+ .Python
240
+ build/
241
+ develop-eggs/
242
+ dist/
243
+ downloads/
244
+ eggs/
245
+ .eggs/
246
+ lib/
247
+ lib64/
248
+ parts/
249
+ sdist/
250
+ var/
251
+ wheels/
252
+ pip-wheel-metadata/
253
+ share/python-wheels/
254
+ *.egg-info/
255
+ .installed.cfg
256
+ *.egg
257
+ MANIFEST
258
+
259
+ # FastAPI / Uvicorn
260
+ uvicorn_error.log
261
+ uvicorn_access.log
262
+
263
+ # Docker
264
+ .dockerignore
265
+ .docker/
266
+
267
+ # Docker compose
268
+ .docker-compose.override.yml
269
+ .docker-compose.yml
270
+
271
+ # Local development settings
272
+ .env.local
273
+ .env.development.local
274
+ .env.test.local
275
+ .env.production.local
276
+
277
+ # Application logs
278
+ app.log
279
+ *.log
280
+
281
+ # Database migrations backup
282
+ alembic/versions/*.bak
283
+
284
+ # Coverage reports
285
+ .coverage.xml
286
+ .coveralls.yml
287
+ coverage/lcov-report/
288
+
289
+ # IDE
290
+ .vscode/settings.json
291
+ !.vscode/tasks.json
292
+ !.vscode/launch.json
293
+ !.vscode/extensions.json
294
+ .idea/*
295
+ !.idea/codeStyles/Project.xml
296
+ .vs/
297
+ *.sw*
298
+ *~
299
+ .project
300
+ .pydevproject
301
+ *.kate-swp
302
+
303
+ # Python REPL history
304
+ .python_history
305
+
306
+ # Coverage
307
+ .coverage*
backend/DOCKER_README.md ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AI-Powered Hiring Assessment Platform - Docker Setup
2
+
3
+ This guide explains how to build and run the AI-Powered Hiring Assessment Platform using Docker.
4
+
5
+ ## Prerequisites
6
+
7
+ - Docker installed on your machine
8
+ - Docker Compose installed (usually comes with Docker Desktop)
9
+
10
+ ## Building and Running the Application
11
+
12
+ ### 1. Clone the Repository
13
+
14
+ ```bash
15
+ git clone <repository-url>
16
+ cd backend
17
+ ```
18
+
19
+ ### 2. Build and Run with Docker Compose
20
+
21
+ The easiest way to run the application is using Docker Compose:
22
+
23
+ ```bash
24
+ docker-compose up --build
25
+ ```
26
+
27
+ This command will:
28
+ - Build the backend image
29
+ - Start the backend service
30
+ - Expose the application on port 8000
31
+
32
+ ### 3. Access the Application
33
+
34
+ Once the containers are running, you can access the application at:
35
+ - API Documentation: http://localhost:8000/docs
36
+ - Health Check: http://localhost:8000/health
37
+
38
+ ### 4. Alternative: Build and Run Individual Containers
39
+
40
+ If you prefer to build and run individual containers:
41
+
42
+ #### Build the Image
43
+ ```bash
44
+ docker build -t assessment-platform-backend .
45
+ ```
46
+
47
+ #### Run the Container
48
+ ```bash
49
+ docker run -p 8000:8000 assessment-platform-backend
50
+ ```
51
+
52
+ ## Environment Variables
53
+
54
+ The application uses the following environment variables (defined in docker-compose.yml):
55
+
56
+ - `DATABASE_URL`: Database connection string (defaults to SQLite)
57
+ - `HOST`: Host address (defaults to 0.0.0.0)
58
+ - `PORT`: Port number (defaults to 8000)
59
+ - `DEBUG`: Debug mode (defaults to True)
60
+ - `LOG_LEVEL`: Logging level (defaults to INFO)
61
+ - `LOG_FILE`: Log file path (defaults to app.log)
62
+ - `SECRET_KEY`: Secret key for JWT tokens
63
+ - `ALGORITHM`: Algorithm for JWT encoding
64
+ - `ACCESS_TOKEN_EXPIRE_MINUTES`: Token expiration time
65
+ - `APP_NAME`: Application name
66
+ - `APP_VERSION`: Application version
67
+ - `APP_DESCRIPTION`: Application description
68
+
69
+ ## Stopping the Application
70
+
71
+ To stop the application:
72
+
73
+ ```bash
74
+ # If running with docker-compose
75
+ Ctrl+C in the terminal where it's running
76
+
77
+ # Or in another terminal
78
+ docker-compose down
79
+ ```
80
+
81
+ ## Troubleshooting
82
+
83
+ 1. **Port Already in Use**: If port 8000 is already in use, change the port mapping in docker-compose.yml
84
+
85
+ 2. **Permission Issues**: Make sure you have the necessary permissions to run Docker commands
86
+
87
+ 3. **Build Errors**: Check that all dependencies in requirements.txt are compatible with the Python version
88
+
89
+ ## Development Notes
90
+
91
+ - The current setup uses SQLite as the database for simplicity
92
+ - For production deployments, consider using PostgreSQL or MySQL
93
+ - The volume mount in docker-compose.yml allows for live reloading during development
94
+ - Logs are stored in the ./logs directory on the host machine
backend/Dockerfile ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use official Python runtime as the base image
2
+ FROM python:3.11-slim
3
+
4
+ # Set environment variables
5
+ ENV PYTHONDONTWRITEBYTECODE 1
6
+ ENV PYTHONUNBUFFERED 1
7
+
8
+ # Set the working directory in the container
9
+ WORKDIR /app
10
+
11
+ # Install system dependencies
12
+ RUN apt-get update \
13
+ && apt-get install -y --no-install-recommends \
14
+ build-essential \
15
+ gcc \
16
+ && rm -rf /var/lib/apt/lists/*
17
+
18
+ # Copy the requirements file first to leverage Docker cache
19
+ COPY requirements*.txt ./
20
+
21
+ # Install Python dependencies
22
+ RUN pip install --upgrade pip
23
+ RUN pip install -r requirements.txt
24
+
25
+ # Copy the rest of the application code
26
+ COPY . .
27
+
28
+ # Expose the port the app runs on
29
+ EXPOSE 8000
30
+
31
+ # Run the application
32
+ CMD ["python", "main.py"]
backend/QWEN.md ADDED
@@ -0,0 +1,240 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AI-Powered Hiring Assessment Platform Backend
2
+
3
+ ## Project Overview
4
+
5
+ This is a FastAPI-based backend application for an AI-powered hiring assessment platform. The system enables HR professionals to create and manage assessments for job candidates, while allowing candidates to take assessments and review their results.
6
+
7
+ The application follows a clean architecture with proper separation of concerns:
8
+ - **API Layer**: Handles HTTP requests and responses
9
+ - **Service Layer**: Contains business logic
10
+ - **Database Layer**: Manages database connections and sessions
11
+ - **Model Layer**: Defines database models using SQLAlchemy
12
+ - **Schema Layer**: Defines Pydantic schemas for request/response validation
13
+
14
+ ## Technologies Used
15
+
16
+ - **Python 3.11**
17
+ - **FastAPI**: Modern, fast web framework for building APIs
18
+ - **SQLAlchemy**: SQL toolkit and ORM for database operations
19
+ - **SQLite**: Lightweight database for development
20
+ - **Alembic**: Database migration tool
21
+ - **Pydantic**: Data validation and settings management
22
+ - **UUID**: For generating unique identifiers
23
+
24
+ ## Architecture Components
25
+
26
+ ### Directory Structure
27
+ ```
28
+ backend/
29
+ ├── api/ # API route definitions
30
+ │ ├── user_routes.py # User registration/login endpoints
31
+ │ ├── job_routes.py # Job-related endpoints
32
+ │ ├── assessment_routes.py # Assessment-related endpoints
33
+ │ ├── application_routes.py # Application-related endpoints
34
+ │ └── routes.py # Root and health check endpoints
35
+ ├── database/ # Database connection utilities
36
+ │ └── database.py # Database engine and session management
37
+ ├── models/ # SQLAlchemy models
38
+ │ ├── user.py # User model
39
+ │ ├── job.py # Job model
40
+ │ ├── assessment.py # Assessment model
41
+ │ ├── application.py # Application model
42
+ │ └── base.py # Base model class
43
+ ├── schemas/ # Pydantic schemas
44
+ │ ├── user.py # User schemas
45
+ │ ├── job.py # Job schemas
46
+ │ ├── assessment.py # Assessment schemas
47
+ │ ├── application.py # Application schemas
48
+ │ └── base.py # Base schema class
49
+ ├── services/ # Business logic layer
50
+ │ ├── user_service.py # User-related services
51
+ │ ├── job_service.py # Job-related services
52
+ │ ├── assessment_service.py # Assessment-related services
53
+ │ ├── application_service.py # Application-related services
54
+ │ └── base_service.py # Generic service functions
55
+ ├── alembic/ # Database migration files
56
+ ├── config.py # Application configuration
57
+ ├── logging_config.py # Logging configuration
58
+ ├── main.py # Application entry point
59
+ ├── .env # Environment variables
60
+ └── requirements.txt # Python dependencies
61
+ ```
62
+
63
+ ### Key Features
64
+
65
+ 1. **User Management**:
66
+ - Registration and authentication
67
+ - Role-based access (HR vs Applicant)
68
+
69
+ 2. **Job Management**:
70
+ - Create, update, delete job postings
71
+ - Manage job details and requirements
72
+
73
+ 3. **Assessment Management**:
74
+ - Create assessments linked to jobs
75
+ - Define questions and passing scores
76
+ - Regenerate assessments with new questions
77
+
78
+ 4. **Application Management**:
79
+ - Submit applications with answers
80
+ - Track application results and scores
81
+
82
+ ### API Endpoints
83
+
84
+ #### Registration
85
+ - `POST /registration/signup` - User registration
86
+ - `POST /registration/login` - User login
87
+ - `POST /registration/logout` - User logout
88
+
89
+ #### Users
90
+ - `GET /users/{id}` - Get user details
91
+
92
+ #### Jobs
93
+ - `GET /jobs` - List jobs
94
+ - `GET /jobs/{id}` - Get job details
95
+ - `POST /jobs` - Create job
96
+ - `PATCH /jobs/{id}` - Update job
97
+ - `DELETE /jobs/{id}` - Delete job
98
+
99
+ #### Assessments
100
+ - `GET /assessments/jobs/{jid}` - List assessments for a job
101
+ - `GET /assessments/jobs/{jid}/{aid}` - Get assessment details
102
+ - `POST /assessments/jobs/{id}` - Create assessment
103
+ - `PATCH /assessments/jobs/{jid}/{aid}/regenerate` - Regenerate assessment
104
+ - `PATCH /assessments/jobs/{jid}/{aid}` - Update assessment
105
+ - `DELETE /assessments/jobs/{jid}/{aid}` - Delete assessment
106
+
107
+ #### Applications
108
+ - `GET /applications/jobs/{jid}/assessments/{aid}` - List applications
109
+ - `POST /applications/jobs/{jid}/assessments/{aid}` - Create application
110
+
111
+ #### Health Check
112
+ - `GET /` - Root endpoint
113
+ - `GET /health` - Health check endpoint
114
+
115
+ ## Configuration
116
+
117
+ The application uses a `.env` file for configuration, managed through the `config.py` file:
118
+
119
+ ```env
120
+ # Database Configuration
121
+ DATABASE_URL=sqlite:///./assessment_platform.db
122
+
123
+ # Server Configuration
124
+ HOST=0.0.0.0
125
+ PORT=8000
126
+ DEBUG=False
127
+
128
+ # Logging Configuration
129
+ LOG_LEVEL=INFO
130
+ LOG_FILE=app.log
131
+ LOG_FORMAT=%(asctime)s - %(name)s - %(levelname)s - %(message)s
132
+
133
+ # JWT Configuration (for future use)
134
+ SECRET_KEY=your-secret-key-here
135
+ ALGORITHM=HS256
136
+ ACCESS_TOKEN_EXPIRE_MINUTES=30
137
+
138
+ # Application Configuration
139
+ APP_NAME=AI-Powered Hiring Assessment Platform
140
+ APP_VERSION=0.1.0
141
+ APP_DESCRIPTION=MVP for managing hiring assessments using AI
142
+ ```
143
+
144
+ ## Building and Running
145
+
146
+ ### Prerequisites
147
+ - Python 3.11+
148
+ - pip package manager
149
+
150
+ ### Setup Instructions
151
+
152
+ 1. **Install Dependencies**:
153
+ ```bash
154
+ pip install -r requirements.txt
155
+ ```
156
+
157
+ 2. **Set Up Environment Variables**:
158
+ Copy the `.env.example` file to `.env` and adjust the values as needed.
159
+
160
+ 3. **Run Database Migrations**:
161
+ ```bash
162
+ alembic upgrade head
163
+ ```
164
+
165
+ 4. **Start the Application**:
166
+ ```bash
167
+ python main.py
168
+ ```
169
+
170
+ Or using uvicorn directly:
171
+ ```bash
172
+ uvicorn main:app --host 0.0.0.0 --port 8000 --reload
173
+ ```
174
+
175
+ ### Development Mode
176
+ For development, you can run the application with hot-reloading enabled:
177
+ ```bash
178
+ uvicorn main:app --reload --host 0.0.0.0 --port 8000
179
+ ```
180
+
181
+ ## Testing
182
+
183
+ To run tests (when available):
184
+ ```bash
185
+ pytest
186
+ ```
187
+
188
+ ## Logging
189
+
190
+ The application implements comprehensive logging through the `logging_config.py` module:
191
+ - Logs are written to both file (`app.log`) and console
192
+ - Log level can be configured via the `LOG_LEVEL` environment variable
193
+ - Different log levels (DEBUG, INFO, WARNING, ERROR) are used appropriately
194
+ - All major operations are logged with contextual information
195
+
196
+ ## Database Migrations
197
+
198
+ The application uses Alembic for database migrations:
199
+ - To create a new migration: `alembic revision --autogenerate -m "Description"`
200
+ - To apply migrations: `alembic upgrade head`
201
+ - To check current migration status: `alembic current`
202
+
203
+ ## Development Conventions
204
+
205
+ 1. **Code Style**:
206
+ - Follow PEP 8 guidelines
207
+ - Use type hints for all function parameters and return values
208
+ - Write docstrings for all public functions and classes
209
+
210
+ 2. **Error Handling**:
211
+ - Use appropriate HTTP status codes
212
+ - Return meaningful error messages
213
+ - Log errors appropriately
214
+
215
+ 3. **Security**:
216
+ - Passwords should be hashed (currently using placeholder)
217
+ - Input validation through Pydantic schemas
218
+ - SQL injection prevention through SQLAlchemy ORM
219
+
220
+ 4. **Architecture**:
221
+ - Keep business logic in service layer
222
+ - Use dependency injection for database sessions
223
+ - Separate API routes by domain/model
224
+ - Maintain clear separation between layers
225
+
226
+ ## Future Enhancements
227
+
228
+ - JWT token-based authentication
229
+ - Password hashing implementation
230
+ - Advanced assessment features
231
+ - Admin dashboard endpoints
232
+ - More sophisticated logging and monitoring
233
+ - Unit and integration tests
234
+
235
+ # TODO:
236
+ - when creating an assessment we should pass the questions of the assessment.
237
+ - all APIs input and output should have a cleare schema, even the enums should be clear and apear in the swagger apis (when visiting /docs)
238
+ - the validation of the inputs should be done by pydantic and in the model level, not in the model level only!
239
+ - the answers is not a model itself, so the services/answer functions should be aware of that.
240
+
backend/README.md ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # AI-Powered Hiring Assessment Platform - Demo Credentials
2
+
3
+ ## HR Accounts
4
+
5
+ | Name | Email | Password |
6
+ |------|-------|----------|
7
+ | Sarah Johnson | sarah.johnson@demo.com | password123 |
8
+ | Michael Chen | michael.chen@demo.com | password123 |
9
+ | Emma Rodriguez | emma.rodriguez@demo.com | password123 |
10
+ | David Wilson | david.wilson@demo.com | password123 |
11
+
12
+ ## Candidate Accounts
13
+
14
+ | Name | Email | Password |
15
+ |------|-------|----------|
16
+ | Alex Thompson | alex.thompson@demo.com | password123 |
17
+ | Jessica Lee | jessica.lee@demo.com | password123 |
18
+ | Ryan Patel | ryan.patel@demo.com | password123 |
19
+ | Olivia Kim | olivia.kim@demo.com | password123 |
20
+
21
+ ## Sample Jobs & Assessments
22
+
23
+ The demo includes the following sample data:
24
+ - 4 job postings with varying seniority levels
25
+ - 4 assessments linked to these jobs
26
+ - Sample applications submitted by candidates
27
+
28
+ ## Getting Started
29
+
30
+ 1. Clone the repository
31
+ 2. Install dependencies: `pip install -r requirements.txt`
32
+ 3. Set up environment variables (copy `.env.example` to `.env`)
33
+ 4. Run database migrations: `alembic upgrade head`
34
+ 5. Start the application: `python main.py`
35
+ 6. Access the API documentation at `http://localhost:8000/docs`
36
+
37
+ ## API Usage
38
+
39
+ - HR users can create jobs and assessments
40
+ - Candidates can apply to jobs and take assessments
41
+ - All accounts are pre-populated with sample data for demonstration
backend/alembic.ini ADDED
@@ -0,0 +1,149 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # A generic, single database configuration.
2
+
3
+ [alembic]
4
+ # path to migration scripts.
5
+ # this is typically a path given in POSIX (e.g. forward slashes)
6
+ # format, relative to the token %(here)s which refers to the location of this
7
+ # ini file
8
+ script_location = %(here)s/alembic
9
+
10
+ # template used to generate migration file names; The default value is %%(rev)s_%%(slug)s
11
+ # Uncomment the line below if you want the files to be prepended with date and time
12
+ # see https://alembic.sqlalchemy.org/en/latest/tutorial.html#editing-the-ini-file
13
+ # for all available tokens
14
+ # file_template = %%(year)d_%%(month).2d_%%(day).2d_%%(hour).2d%%(minute).2d-%%(rev)s_%%(slug)s
15
+ # Or organize into date-based subdirectories (requires recursive_version_locations = true)
16
+ # file_template = %%(year)d/%%(month).2d/%%(day).2d_%%(hour).2d%%(minute).2d_%%(second).2d_%%(rev)s_%%(slug)s
17
+
18
+ # sys.path path, will be prepended to sys.path if present.
19
+ # defaults to the current working directory. for multiple paths, the path separator
20
+ # is defined by "path_separator" below.
21
+ prepend_sys_path = .
22
+
23
+
24
+ # timezone to use when rendering the date within the migration file
25
+ # as well as the filename.
26
+ # If specified, requires the tzdata library which can be installed by adding
27
+ # `alembic[tz]` to the pip requirements.
28
+ # string value is passed to ZoneInfo()
29
+ # leave blank for localtime
30
+ # timezone =
31
+
32
+ # max length of characters to apply to the "slug" field
33
+ # truncate_slug_length = 40
34
+
35
+ # set to 'true' to run the environment during
36
+ # the 'revision' command, regardless of autogenerate
37
+ # revision_environment = false
38
+
39
+ # set to 'true' to allow .pyc and .pyo files without
40
+ # a source .py file to be detected as revisions in the
41
+ # versions/ directory
42
+ # sourceless = false
43
+
44
+ # version location specification; This defaults
45
+ # to <script_location>/versions. When using multiple version
46
+ # directories, initial revisions must be specified with --version-path.
47
+ # The path separator used here should be the separator specified by "path_separator"
48
+ # below.
49
+ # version_locations = %(here)s/bar:%(here)s/bat:%(here)s/alembic/versions
50
+
51
+ # path_separator; This indicates what character is used to split lists of file
52
+ # paths, including version_locations and prepend_sys_path within configparser
53
+ # files such as alembic.ini.
54
+ # The default rendered in new alembic.ini files is "os", which uses os.pathsep
55
+ # to provide os-dependent path splitting.
56
+ #
57
+ # Note that in order to support legacy alembic.ini files, this default does NOT
58
+ # take place if path_separator is not present in alembic.ini. If this
59
+ # option is omitted entirely, fallback logic is as follows:
60
+ #
61
+ # 1. Parsing of the version_locations option falls back to using the legacy
62
+ # "version_path_separator" key, which if absent then falls back to the legacy
63
+ # behavior of splitting on spaces and/or commas.
64
+ # 2. Parsing of the prepend_sys_path option falls back to the legacy
65
+ # behavior of splitting on spaces, commas, or colons.
66
+ #
67
+ # Valid values for path_separator are:
68
+ #
69
+ # path_separator = :
70
+ # path_separator = ;
71
+ # path_separator = space
72
+ # path_separator = newline
73
+ #
74
+ # Use os.pathsep. Default configuration used for new projects.
75
+ path_separator = os
76
+
77
+ # set to 'true' to search source files recursively
78
+ # in each "version_locations" directory
79
+ # new in Alembic version 1.10
80
+ # recursive_version_locations = false
81
+
82
+ # the output encoding used when revision files
83
+ # are written from script.py.mako
84
+ # output_encoding = utf-8
85
+
86
+ # database URL. This is consumed by the user-maintained env.py script only.
87
+ # other means of configuring database URLs may be customized within the env.py
88
+ # file.
89
+ sqlalchemy.url = sqlite:///assessment_platform.db
90
+
91
+
92
+ [post_write_hooks]
93
+ # post_write_hooks defines scripts or Python functions that are run
94
+ # on newly generated revision scripts. See the documentation for further
95
+ # detail and examples
96
+
97
+ # format using "black" - use the console_scripts runner, against the "black" entrypoint
98
+ # hooks = black
99
+ # black.type = console_scripts
100
+ # black.entrypoint = black
101
+ # black.options = -l 79 REVISION_SCRIPT_FILENAME
102
+
103
+ # lint with attempts to fix using "ruff" - use the module runner, against the "ruff" module
104
+ # hooks = ruff
105
+ # ruff.type = module
106
+ # ruff.module = ruff
107
+ # ruff.options = check --fix REVISION_SCRIPT_FILENAME
108
+
109
+ # Alternatively, use the exec runner to execute a binary found on your PATH
110
+ # hooks = ruff
111
+ # ruff.type = exec
112
+ # ruff.executable = ruff
113
+ # ruff.options = check --fix REVISION_SCRIPT_FILENAME
114
+
115
+ # Logging configuration. This is also consumed by the user-maintained
116
+ # env.py script only.
117
+ [loggers]
118
+ keys = root,sqlalchemy,alembic
119
+
120
+ [handlers]
121
+ keys = console
122
+
123
+ [formatters]
124
+ keys = generic
125
+
126
+ [logger_root]
127
+ level = WARNING
128
+ handlers = console
129
+ qualname =
130
+
131
+ [logger_sqlalchemy]
132
+ level = WARNING
133
+ handlers =
134
+ qualname = sqlalchemy.engine
135
+
136
+ [logger_alembic]
137
+ level = INFO
138
+ handlers =
139
+ qualname = alembic
140
+
141
+ [handler_console]
142
+ class = StreamHandler
143
+ args = (sys.stderr,)
144
+ level = NOTSET
145
+ formatter = generic
146
+
147
+ [formatter_generic]
148
+ format = %(levelname)-5.5s [%(name)s] %(message)s
149
+ datefmt = %H:%M:%S
backend/alembic/README ADDED
@@ -0,0 +1 @@
 
 
1
+ Generic single-database configuration.
backend/alembic/env.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from logging.config import fileConfig
2
+
3
+ from sqlalchemy import engine_from_config
4
+ from sqlalchemy import pool
5
+
6
+ from alembic import context
7
+
8
+ # Import our models
9
+ import sys
10
+ import os
11
+ sys.path.append(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
12
+
13
+ from models import Base # Import the Base from our models __init__ file
14
+
15
+ # this is the Alembic Config object, which provides
16
+ # access to the values within the .ini file in use.
17
+ config = context.config
18
+
19
+ # Interpret the config file for Python logging.
20
+ # This line sets up loggers basically.
21
+ if config.config_file_name is not None:
22
+ fileConfig(config.config_file_name)
23
+
24
+ # add your model's MetaData object here
25
+ # for 'autogenerate' support
26
+ target_metadata = Base.metadata
27
+
28
+ # other values from the config, defined by the needs of env.py,
29
+ # can be acquired:
30
+ # my_important_option = config.get_main_option("my_important_option")
31
+ # ... etc.
32
+
33
+
34
+ def run_migrations_offline() -> None:
35
+ """Run migrations in 'offline' mode.
36
+
37
+ This configures the context with just a URL
38
+ and not an Engine, though an Engine is acceptable
39
+ here as well. By skipping the Engine creation
40
+ we don't even need a DBAPI to be available.
41
+
42
+ Calls to context.execute() here emit the given string to the
43
+ script output.
44
+
45
+ """
46
+ url = config.get_main_option("sqlalchemy.url")
47
+ context.configure(
48
+ url=url,
49
+ target_metadata=target_metadata,
50
+ literal_binds=True,
51
+ dialect_opts={"paramstyle": "named"},
52
+ )
53
+
54
+ with context.begin_transaction():
55
+ context.run_migrations()
56
+
57
+
58
+ def run_migrations_online() -> None:
59
+ """Run migrations in 'online' mode.
60
+
61
+ In this scenario we need to create an Engine
62
+ and associate a connection with the context.
63
+
64
+ """
65
+ connectable = engine_from_config(
66
+ config.get_section(config.config_ini_section, {}),
67
+ prefix="sqlalchemy.",
68
+ poolclass=pool.NullPool,
69
+ )
70
+
71
+ with connectable.connect() as connection:
72
+ context.configure(
73
+ connection=connection, target_metadata=target_metadata
74
+ )
75
+
76
+ with context.begin_transaction():
77
+ context.run_migrations()
78
+
79
+
80
+ if context.is_offline_mode():
81
+ run_migrations_offline()
82
+ else:
83
+ run_migrations_online()
backend/alembic/script.py.mako ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """${message}
2
+
3
+ Revision ID: ${up_revision}
4
+ Revises: ${down_revision | comma,n}
5
+ Create Date: ${create_date}
6
+
7
+ """
8
+ from typing import Sequence, Union
9
+
10
+ from alembic import op
11
+ import sqlalchemy as sa
12
+ ${imports if imports else ""}
13
+
14
+ # revision identifiers, used by Alembic.
15
+ revision: str = ${repr(up_revision)}
16
+ down_revision: Union[str, Sequence[str], None] = ${repr(down_revision)}
17
+ branch_labels: Union[str, Sequence[str], None] = ${repr(branch_labels)}
18
+ depends_on: Union[str, Sequence[str], None] = ${repr(depends_on)}
19
+
20
+
21
+ def upgrade() -> None:
22
+ """Upgrade schema."""
23
+ ${upgrades if upgrades else "pass"}
24
+
25
+
26
+ def downgrade() -> None:
27
+ """Downgrade schema."""
28
+ ${downgrades if downgrades else "pass"}
backend/alembic/versions/1172a2fbb171_initial_migration_for_assessment_.py ADDED
@@ -0,0 +1,117 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Initial migration for assessment platform
2
+
3
+ Revision ID: 1172a2fbb171
4
+ Revises:
5
+ Create Date: 2026-02-02 23:15:34.140221
6
+
7
+ """
8
+ from typing import Sequence, Union
9
+
10
+ from alembic import op
11
+ import sqlalchemy as sa
12
+
13
+
14
+ # revision identifiers, used by Alembic.
15
+ revision: str = '1172a2fbb171'
16
+ down_revision: Union[str, Sequence[str], None] = None
17
+ branch_labels: Union[str, Sequence[str], None] = None
18
+ depends_on: Union[str, Sequence[str], None] = None
19
+
20
+
21
+ def upgrade() -> None:
22
+ """Upgrade schema."""
23
+ # ### commands auto generated by Alembic - please adjust! ###
24
+ op.create_table('users',
25
+ sa.Column('id', sa.Integer(), nullable=False),
26
+ sa.Column('email', sa.String(), nullable=False),
27
+ sa.Column('password_hash', sa.String(), nullable=False),
28
+ sa.Column('user_type', sa.String(), nullable=False),
29
+ sa.Column('created_at', sa.DateTime(), nullable=True),
30
+ sa.CheckConstraint("user_type IN ('hr', 'candidate')", name='valid_user_type'),
31
+ sa.PrimaryKeyConstraint('id')
32
+ )
33
+ op.create_index(op.f('ix_users_email'), 'users', ['email'], unique=True)
34
+ op.create_index(op.f('ix_users_id'), 'users', ['id'], unique=False)
35
+ op.create_table('assessments',
36
+ sa.Column('id', sa.Integer(), nullable=False),
37
+ sa.Column('job_title', sa.String(), nullable=False),
38
+ sa.Column('experience_level', sa.String(), nullable=True),
39
+ sa.Column('required_skills', sa.Text(), nullable=True),
40
+ sa.Column('job_description', sa.Text(), nullable=True),
41
+ sa.Column('num_questions', sa.Integer(), nullable=True),
42
+ sa.Column('assessment_description', sa.Text(), nullable=True),
43
+ sa.Column('hr_id', sa.Integer(), nullable=True),
44
+ sa.Column('is_active', sa.Boolean(), nullable=True),
45
+ sa.Column('created_at', sa.DateTime(), nullable=True),
46
+ sa.ForeignKeyConstraint(['hr_id'], ['users.id'], ),
47
+ sa.PrimaryKeyConstraint('id')
48
+ )
49
+ op.create_index(op.f('ix_assessments_id'), 'assessments', ['id'], unique=False)
50
+ op.create_table('candidates_assessments',
51
+ sa.Column('id', sa.Integer(), nullable=False),
52
+ sa.Column('candidate_id', sa.Integer(), nullable=True),
53
+ sa.Column('assessment_id', sa.Integer(), nullable=True),
54
+ sa.Column('started_at', sa.DateTime(), nullable=True),
55
+ sa.Column('completed_at', sa.DateTime(), nullable=True),
56
+ sa.Column('total_score', sa.Integer(), nullable=True),
57
+ sa.Column('status', sa.String(), nullable=True),
58
+ sa.ForeignKeyConstraint(['assessment_id'], ['assessments.id'], ),
59
+ sa.ForeignKeyConstraint(['candidate_id'], ['users.id'], ),
60
+ sa.PrimaryKeyConstraint('id')
61
+ )
62
+ op.create_index(op.f('ix_candidates_assessments_id'), 'candidates_assessments', ['id'], unique=False)
63
+ op.create_table('questions',
64
+ sa.Column('id', sa.Integer(), nullable=False),
65
+ sa.Column('assessment_id', sa.Integer(), nullable=True),
66
+ sa.Column('question_text', sa.Text(), nullable=False),
67
+ sa.Column('question_type', sa.String(), nullable=False),
68
+ sa.Column('is_knockout', sa.Boolean(), nullable=True),
69
+ sa.Column('weight', sa.Integer(), nullable=True),
70
+ sa.Column('max_score', sa.Integer(), nullable=True),
71
+ sa.Column('created_at', sa.DateTime(), nullable=True),
72
+ sa.CheckConstraint("question_type IN ('text', 'true_false', 'multiple_choice')", name='valid_question_type'),
73
+ sa.ForeignKeyConstraint(['assessment_id'], ['assessments.id'], ),
74
+ sa.PrimaryKeyConstraint('id')
75
+ )
76
+ op.create_index(op.f('ix_questions_id'), 'questions', ['id'], unique=False)
77
+ op.create_table('answers',
78
+ sa.Column('id', sa.Integer(), nullable=False),
79
+ sa.Column('candidate_assessment_id', sa.Integer(), nullable=True),
80
+ sa.Column('question_id', sa.Integer(), nullable=True),
81
+ sa.Column('answer_text', sa.Text(), nullable=True),
82
+ sa.Column('score_awarded', sa.Integer(), nullable=True),
83
+ sa.Column('ai_evaluation', sa.Text(), nullable=True),
84
+ sa.Column('evaluated_at', sa.DateTime(), nullable=True),
85
+ sa.ForeignKeyConstraint(['candidate_assessment_id'], ['candidates_assessments.id'], ),
86
+ sa.ForeignKeyConstraint(['question_id'], ['questions.id'], ),
87
+ sa.PrimaryKeyConstraint('id')
88
+ )
89
+ op.create_index(op.f('ix_answers_id'), 'answers', ['id'], unique=False)
90
+ op.create_table('question_tags',
91
+ sa.Column('id', sa.Integer(), nullable=False),
92
+ sa.Column('question_id', sa.Integer(), nullable=True),
93
+ sa.Column('tag_name', sa.String(), nullable=False),
94
+ sa.ForeignKeyConstraint(['question_id'], ['questions.id'], ),
95
+ sa.PrimaryKeyConstraint('id')
96
+ )
97
+ op.create_index(op.f('ix_question_tags_id'), 'question_tags', ['id'], unique=False)
98
+ # ### end Alembic commands ###
99
+
100
+
101
+ def downgrade() -> None:
102
+ """Downgrade schema."""
103
+ # ### commands auto generated by Alembic - please adjust! ###
104
+ op.drop_index(op.f('ix_question_tags_id'), table_name='question_tags')
105
+ op.drop_table('question_tags')
106
+ op.drop_index(op.f('ix_answers_id'), table_name='answers')
107
+ op.drop_table('answers')
108
+ op.drop_index(op.f('ix_questions_id'), table_name='questions')
109
+ op.drop_table('questions')
110
+ op.drop_index(op.f('ix_candidates_assessments_id'), table_name='candidates_assessments')
111
+ op.drop_table('candidates_assessments')
112
+ op.drop_index(op.f('ix_assessments_id'), table_name='assessments')
113
+ op.drop_table('assessments')
114
+ op.drop_index(op.f('ix_users_id'), table_name='users')
115
+ op.drop_index(op.f('ix_users_email'), table_name='users')
116
+ op.drop_table('users')
117
+ # ### end Alembic commands ###
backend/alembic/versions/91905f51740d_final_structure_for_technical_.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Final structure for technical requirements
2
+
3
+ Revision ID: 91905f51740d
4
+ Revises: 1172a2fbb171
5
+ Create Date: 2026-02-03 00:46:41.529962
6
+
7
+ """
8
+ from typing import Sequence, Union
9
+
10
+ from alembic import op
11
+ import sqlalchemy as sa
12
+
13
+
14
+ # revision identifiers, used by Alembic.
15
+ revision: str = '91905f51740d'
16
+ down_revision: Union[str, Sequence[str], None] = '1172a2fbb171'
17
+ branch_labels: Union[str, Sequence[str], None] = None
18
+ depends_on: Union[str, Sequence[str], None] = None
19
+
20
+
21
+ def upgrade() -> None:
22
+ """Upgrade schema."""
23
+ # ### commands auto generated by Alembic - please adjust! ###
24
+ pass
25
+ # ### end Alembic commands ###
26
+
27
+
28
+ def downgrade() -> None:
29
+ """Downgrade schema."""
30
+ # ### commands auto generated by Alembic - please adjust! ###
31
+ pass
32
+ # ### end Alembic commands ###
backend/alembic/versions/9facd9b60600_seed_demo_data_with_hr_and_candidate_.py ADDED
@@ -0,0 +1,396 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """Seed demo data with HR and candidate accounts
2
+
3
+ Revision ID: 9facd9b60600
4
+ Revises: 91905f51740d
5
+ Create Date: 2026-02-04 15:47:05.330740
6
+
7
+ """
8
+ from typing import Sequence, Union
9
+ import uuid
10
+ from alembic import op
11
+ import sqlalchemy as sa
12
+ from sqlalchemy.sql import table, column
13
+ from sqlalchemy import String, Integer, Boolean, Text
14
+ import json
15
+ from utils.password_utils import get_password_hash
16
+
17
+
18
+ # revision identifiers, used by Alembic.
19
+ revision: str = '9facd9b60600'
20
+ down_revision: Union[str, Sequence[str], None] = '91905f51740d'
21
+ branch_labels: Union[str, Sequence[str], None] = None
22
+ depends_on: Union[str, Sequence[str], None] = None
23
+
24
+
25
+ def upgrade() -> None:
26
+ """Upgrade schema with demo data."""
27
+ # Create table objects for insertion
28
+ users_table = table('users',
29
+ column('id', String),
30
+ column('first_name', String),
31
+ column('last_name', String),
32
+ column('email', String),
33
+ column('password', String),
34
+ column('role', String)
35
+ )
36
+
37
+ jobs_table = table('jobs',
38
+ column('id', String),
39
+ column('title', String),
40
+ column('seniority', String),
41
+ column('description', Text),
42
+ column('skill_categories', String),
43
+ column('active', Boolean)
44
+ )
45
+
46
+ assessments_table = table('assessments',
47
+ column('id', String),
48
+ column('job_id', String),
49
+ column('title', String),
50
+ column('duration', Integer),
51
+ column('passing_score', Integer),
52
+ column('questions', Text),
53
+ column('active', Boolean)
54
+ )
55
+
56
+ # Hash the password for all users
57
+ password_hash = get_password_hash("password123")
58
+
59
+ # Insert HR users
60
+ hr_users = [
61
+ {
62
+ 'id': str(uuid.uuid4()),
63
+ 'first_name': 'Sarah',
64
+ 'last_name': 'Johnson',
65
+ 'email': 'sarah.johnson@demo.com',
66
+ 'password': password_hash,
67
+ 'role': 'hr'
68
+ },
69
+ {
70
+ 'id': str(uuid.uuid4()),
71
+ 'first_name': 'Michael',
72
+ 'last_name': 'Chen',
73
+ 'email': 'michael.chen@demo.com',
74
+ 'password': password_hash,
75
+ 'role': 'hr'
76
+ },
77
+ {
78
+ 'id': str(uuid.uuid4()),
79
+ 'first_name': 'Emma',
80
+ 'last_name': 'Rodriguez',
81
+ 'email': 'emma.rodriguez@demo.com',
82
+ 'password': password_hash,
83
+ 'role': 'hr'
84
+ },
85
+ {
86
+ 'id': str(uuid.uuid4()),
87
+ 'first_name': 'David',
88
+ 'last_name': 'Wilson',
89
+ 'email': 'david.wilson@demo.com',
90
+ 'password': password_hash,
91
+ 'role': 'hr'
92
+ }
93
+ ]
94
+
95
+ # Insert candidate users
96
+ candidate_users = [
97
+ {
98
+ 'id': str(uuid.uuid4()),
99
+ 'first_name': 'Alex',
100
+ 'last_name': 'Thompson',
101
+ 'email': 'alex.thompson@demo.com',
102
+ 'password': password_hash,
103
+ 'role': 'applicant'
104
+ },
105
+ {
106
+ 'id': str(uuid.uuid4()),
107
+ 'first_name': 'Jessica',
108
+ 'last_name': 'Lee',
109
+ 'email': 'jessica.lee@demo.com',
110
+ 'password': password_hash,
111
+ 'role': 'applicant'
112
+ },
113
+ {
114
+ 'id': str(uuid.uuid4()),
115
+ 'first_name': 'Ryan',
116
+ 'last_name': 'Patel',
117
+ 'email': 'ryan.patel@demo.com',
118
+ 'password': password_hash,
119
+ 'role': 'applicant'
120
+ },
121
+ {
122
+ 'id': str(uuid.uuid4()),
123
+ 'first_name': 'Olivia',
124
+ 'last_name': 'Kim',
125
+ 'email': 'olivia.kim@demo.com',
126
+ 'password': password_hash,
127
+ 'role': 'applicant'
128
+ }
129
+ ]
130
+
131
+ # Combine all users
132
+ all_users = hr_users + candidate_users
133
+
134
+ # Insert users
135
+ op.bulk_insert(users_table, all_users)
136
+
137
+ # Insert sample jobs
138
+ jobs = [
139
+ {
140
+ 'id': str(uuid.uuid4()),
141
+ 'title': 'Senior Python Developer',
142
+ 'seniority': 'senior',
143
+ 'description': 'We are looking for an experienced Python developer to join our team. The ideal candidate should have experience with web frameworks, databases, and cloud technologies.',
144
+ 'skill_categories': json.dumps(['python', 'django', 'flask', 'sql', 'cloud']),
145
+ 'active': True
146
+ },
147
+ {
148
+ 'id': str(uuid.uuid4()),
149
+ 'title': 'Junior Data Analyst',
150
+ 'seniority': 'junior',
151
+ 'description': 'We are looking for a Junior Data Analyst to join our analytics team. The ideal candidate should have experience with data visualization, statistical analysis, and SQL queries.',
152
+ 'skill_categories': json.dumps(['sql', 'python', 'excel', 'tableau', 'statistics']),
153
+ 'active': True
154
+ },
155
+ {
156
+ 'id': str(uuid.uuid4()),
157
+ 'title': 'Mid-Level Software Engineer',
158
+ 'seniority': 'mid',
159
+ 'description': 'We are looking for a Mid-Level Software Engineer with experience in Python, Django, and REST APIs.',
160
+ 'skill_categories': json.dumps(['python', 'django', 'rest-api', 'sql', 'testing']),
161
+ 'active': True
162
+ },
163
+ {
164
+ 'id': str(uuid.uuid4()),
165
+ 'title': 'DevOps Engineer',
166
+ 'seniority': 'mid',
167
+ 'description': 'We are looking for a DevOps Engineer to help us improve our CI/CD pipelines and infrastructure automation.',
168
+ 'skill_categories': json.dumps(['docker', 'kubernetes', 'aws', 'jenkins', 'terraform']),
169
+ 'active': True
170
+ }
171
+ ]
172
+
173
+ # Insert jobs
174
+ op.bulk_insert(jobs_table, jobs)
175
+
176
+ # Create a mapping of job titles to IDs for assessment creation
177
+ job_mapping = {job['title']: job['id'] for job in jobs}
178
+
179
+ # Create sample assessments with questions
180
+ assessments = []
181
+
182
+ # Python Developer Assessment
183
+ python_questions = [
184
+ {
185
+ "id": str(uuid.uuid4()),
186
+ "text": "What is the difference between a list and a tuple in Python?",
187
+ "weight": 3,
188
+ "skill_categories": ["python"],
189
+ "type": "text_based",
190
+ "options": [],
191
+ "correct_options": []
192
+ },
193
+ {
194
+ "id": str(uuid.uuid4()),
195
+ "text": "Which of the following is a mutable data type in Python?",
196
+ "weight": 2,
197
+ "skill_categories": ["python"],
198
+ "type": "choose_one",
199
+ "options": [
200
+ {"text": "Tuple", "value": "a"},
201
+ {"text": "String", "value": "b"},
202
+ {"text": "List", "value": "c"},
203
+ {"text": "Integer", "value": "d"}
204
+ ],
205
+ "correct_options": ["c"]
206
+ },
207
+ {
208
+ "id": str(uuid.uuid4()),
209
+ "text": "Which of the following are Python web frameworks?",
210
+ "weight": 3,
211
+ "skill_categories": ["python", "web-development"],
212
+ "type": "choose_many",
213
+ "options": [
214
+ {"text": "Django", "value": "a"},
215
+ {"text": "Express", "value": "b"},
216
+ {"text": "Flask", "value": "c"},
217
+ {"text": "Spring", "value": "d"}
218
+ ],
219
+ "correct_options": ["a", "c"]
220
+ }
221
+ ]
222
+
223
+ assessments.append({
224
+ 'id': str(uuid.uuid4()),
225
+ 'job_id': job_mapping['Senior Python Developer'],
226
+ 'title': 'Python Programming Skills Assessment',
227
+ 'duration': 1800, # 30 minutes
228
+ 'passing_score': 70,
229
+ 'questions': json.dumps(python_questions),
230
+ 'active': True
231
+ })
232
+
233
+ # Data Analyst Assessment
234
+ data_analyst_questions = [
235
+ {
236
+ "id": str(uuid.uuid4()),
237
+ "text": "What is the purpose of GROUP BY clause in SQL?",
238
+ "weight": 3,
239
+ "skill_categories": ["sql"],
240
+ "type": "text_based",
241
+ "options": [],
242
+ "correct_options": []
243
+ },
244
+ {
245
+ "id": str(uuid.uuid4()),
246
+ "text": "Which of the following are data visualization tools?",
247
+ "weight": 2,
248
+ "skill_categories": ["data-visualization"],
249
+ "type": "choose_many",
250
+ "options": [
251
+ {"text": "Tableau", "value": "a"},
252
+ {"text": "Power BI", "value": "b"},
253
+ {"text": "Excel", "value": "c"},
254
+ {"text": "Notepad", "value": "d"}
255
+ ],
256
+ "correct_options": ["a", "b", "c"]
257
+ },
258
+ {
259
+ "id": str(uuid.uuid4()),
260
+ "text": "What does the acronym ETL stand for?",
261
+ "weight": 2,
262
+ "skill_categories": ["data-processing"],
263
+ "type": "choose_one",
264
+ "options": [
265
+ {"text": "Extract, Transform, Load", "value": "a"},
266
+ {"text": "Edit, Transfer, Link", "value": "b"},
267
+ {"text": "Encode, Transmit, Log", "value": "c"},
268
+ {"text": "Estimate, Test, Learn", "value": "d"}
269
+ ],
270
+ "correct_options": ["a"]
271
+ }
272
+ ]
273
+
274
+ assessments.append({
275
+ 'id': str(uuid.uuid4()),
276
+ 'job_id': job_mapping['Junior Data Analyst'],
277
+ 'title': 'Data Analysis Skills Assessment',
278
+ 'duration': 2400, # 40 minutes
279
+ 'passing_score': 65,
280
+ 'questions': json.dumps(data_analyst_questions),
281
+ 'active': True
282
+ })
283
+
284
+ # Software Engineer Assessment
285
+ software_eng_questions = [
286
+ {
287
+ "id": str(uuid.uuid4()),
288
+ "text": "Explain the difference between REST and GraphQL APIs.",
289
+ "weight": 4,
290
+ "skill_categories": ["api-design"],
291
+ "type": "text_based",
292
+ "options": [],
293
+ "correct_options": []
294
+ },
295
+ {
296
+ "id": str(uuid.uuid4()),
297
+ "text": "Which HTTP status code indicates a successful request?",
298
+ "weight": 1,
299
+ "skill_categories": ["web-development"],
300
+ "type": "choose_one",
301
+ "options": [
302
+ {"text": "200", "value": "a"},
303
+ {"text": "404", "value": "b"},
304
+ {"text": "500", "value": "c"},
305
+ {"text": "301", "value": "d"}
306
+ ],
307
+ "correct_options": ["a"]
308
+ },
309
+ {
310
+ "id": str(uuid.uuid4()),
311
+ "text": "Which of the following are version control systems?",
312
+ "weight": 2,
313
+ "skill_categories": ["development-tools"],
314
+ "type": "choose_many",
315
+ "options": [
316
+ {"text": "Git", "value": "a"},
317
+ {"text": "SVN", "value": "b"},
318
+ {"text": "Mercurial", "value": "c"},
319
+ {"text": "Docker", "value": "d"}
320
+ ],
321
+ "correct_options": ["a", "b", "c"]
322
+ }
323
+ ]
324
+
325
+ assessments.append({
326
+ 'id': str(uuid.uuid4()),
327
+ 'job_id': job_mapping['Mid-Level Software Engineer'],
328
+ 'title': 'Software Engineering Fundamentals Assessment',
329
+ 'duration': 1800, # 30 minutes
330
+ 'passing_score': 75,
331
+ 'questions': json.dumps(software_eng_questions),
332
+ 'active': True
333
+ })
334
+
335
+ # DevOps Assessment
336
+ devops_questions = [
337
+ {
338
+ "id": str(uuid.uuid4()),
339
+ "text": "What is the main purpose of Docker containers?",
340
+ "weight": 3,
341
+ "skill_categories": ["containerization"],
342
+ "type": "text_based",
343
+ "options": [],
344
+ "correct_options": []
345
+ },
346
+ {
347
+ "id": str(uuid.uuid4()),
348
+ "text": "Which of the following are container orchestration platforms?",
349
+ "weight": 3,
350
+ "skill_categories": ["orchestration"],
351
+ "type": "choose_many",
352
+ "options": [
353
+ {"text": "Kubernetes", "value": "a"},
354
+ {"text": "Docker Swarm", "value": "b"},
355
+ {"text": "Apache Mesos", "value": "c"},
356
+ {"text": "Jenkins", "value": "d"}
357
+ ],
358
+ "correct_options": ["a", "b", "c"]
359
+ },
360
+ {
361
+ "id": str(uuid.uuid4()),
362
+ "text": "What does CI/CD stand for?",
363
+ "weight": 1,
364
+ "skill_categories": ["development-process"],
365
+ "type": "choose_one",
366
+ "options": [
367
+ {"text": "Continuous Integration/Continuous Deployment", "value": "a"},
368
+ {"text": "Computer Integrated Design", "value": "b"},
369
+ {"text": "Customer Identity and Data", "value": "c"},
370
+ {"text": "Cloud Infrastructure Development", "value": "d"}
371
+ ],
372
+ "correct_options": ["a"]
373
+ }
374
+ ]
375
+
376
+ assessments.append({
377
+ 'id': str(uuid.uuid4()),
378
+ 'job_id': job_mapping['DevOps Engineer'],
379
+ 'title': 'DevOps Practices Assessment',
380
+ 'duration': 2100, # 35 minutes
381
+ 'passing_score': 70,
382
+ 'questions': json.dumps(devops_questions),
383
+ 'active': True
384
+ })
385
+
386
+ # Insert assessments
387
+ op.bulk_insert(assessments_table, assessments)
388
+
389
+
390
+ def downgrade() -> None:
391
+ """Downgrade schema - remove demo data."""
392
+ # Delete all records from the tables
393
+ op.execute("DELETE FROM applications")
394
+ op.execute("DELETE FROM assessments")
395
+ op.execute("DELETE FROM jobs")
396
+ op.execute("DELETE FROM users")
backend/api/__init__.py ADDED
File without changes
backend/api/application_routes.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter, Depends, HTTPException, status
2
+ from sqlalchemy.orm import Session
3
+ from typing import List
4
+ import json
5
+
6
+ from database.database import get_db
7
+ from schemas import ApplicationCreate, ApplicationUpdate, ApplicationResponse, ApplicationListResponse, ApplicationDetailedResponse, ApplicationDetailedListResponse
8
+ from services import create_application, get_application, get_applications_by_job_and_assessment, calculate_application_score
9
+ from services.assessment_service import get_assessment
10
+ from utils.dependencies import get_current_user
11
+ from models.user import User
12
+ from logging_config import get_logger
13
+
14
+ # Create logger for this module
15
+ logger = get_logger(__name__)
16
+
17
+ router = APIRouter(prefix="/applications", tags=["applications"])
18
+
19
+ @router.get("/jobs/{jid}/assessments/{aid}", response_model=ApplicationDetailedListResponse)
20
+ def get_applications_list(jid: str, aid: str, page: int = 1, limit: int = 10, db: Session = Depends(get_db), current_user: User = Depends(get_current_user)):
21
+ """Get list of applications for an assessment"""
22
+ logger.info(f"Retrieving applications list for job ID: {jid}, assessment ID: {aid}, page: {page}, limit: {limit} by user: {current_user.id}")
23
+ # Only HR users can view applications
24
+ if current_user.role != "hr":
25
+ logger.warning(f"Unauthorized attempt to view applications by user: {current_user.id} with role: {current_user.role}")
26
+ raise HTTPException(
27
+ status_code=status.HTTP_403_FORBIDDEN,
28
+ detail="Only HR users can view applications"
29
+ )
30
+ skip = (page - 1) * limit
31
+ applications = get_applications_by_job_and_assessment(db, jid, aid, skip=skip, limit=limit)
32
+
33
+ # Calculate total count
34
+ total = len(get_applications_by_job_and_assessment(db, jid, aid, skip=0, limit=1000)) # Simplified for demo
35
+
36
+ # Convert answers from JSON string to list and calculate scores
37
+ application_responses = []
38
+ for application in applications:
39
+ application_dict = application.__dict__
40
+ if application.answers:
41
+ application_dict['answers'] = json.loads(application.answers)
42
+ else:
43
+ application_dict['answers'] = []
44
+
45
+ # Calculate score (placeholder)
46
+ application_dict['score'] = calculate_application_score(db, application.id)
47
+ application_dict['passing_score'] = 0.0 # Placeholder
48
+
49
+ application_responses.append(ApplicationResponse(**application_dict))
50
+
51
+ logger.info(f"Successfully retrieved {len(applications)} applications out of total {total} for job ID: {jid}, assessment ID: {aid}")
52
+ return ApplicationDetailedListResponse(
53
+ count=len(applications),
54
+ total=total,
55
+ data=application_responses
56
+ )
57
+
58
+ @router.post("/jobs/{jid}/assessments/{aid}", response_model=dict) # Returns just id as per requirements
59
+ def create_new_application(jid: str, aid: str, application: ApplicationCreate, db: Session = Depends(get_db), current_user: User = Depends(get_current_user)):
60
+ """Create a new application for an assessment"""
61
+ logger.info(f"Creating new application for job ID: {jid}, assessment ID: {aid}, user ID: {application.user_id} by user: {current_user.id}")
62
+ # Only applicant users can create applications
63
+ if current_user.role != "applicant":
64
+ logger.warning(f"Unauthorized attempt to create application by user: {current_user.id} with role: {current_user.role}")
65
+ raise HTTPException(
66
+ status_code=status.HTTP_403_FORBIDDEN,
67
+ detail="Only applicant users can submit applications"
68
+ )
69
+ # Ensure the user submitting the application is the same as the one in the request
70
+ if current_user.id != application.user_id:
71
+ logger.warning(f"User {current_user.id} attempted to submit application for user {application.user_id}")
72
+ raise HTTPException(
73
+ status_code=status.HTTP_403_FORBIDDEN,
74
+ detail="Cannot submit application for another user"
75
+ )
76
+
77
+ # Validate that the job and assessment exist and match
78
+ assessment_obj = get_assessment(db, aid)
79
+ if not assessment_obj or assessment_obj.job_id != jid:
80
+ logger.warning(f"Assessment not found for job ID: {jid}, assessment ID: {aid}")
81
+ raise HTTPException(
82
+ status_code=status.HTTP_404_NOT_FOUND,
83
+ detail="Assessment not found for this job"
84
+ )
85
+
86
+ db_application = create_application(db, application)
87
+ logger.info(f"Successfully created application with ID: {db_application.id} for job ID: {jid}, assessment ID: {aid}")
88
+ return {"id": db_application.id}
backend/api/assessment_routes.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter, Depends, HTTPException, status
2
+ from sqlalchemy.orm import Session
3
+ from typing import List
4
+ import json
5
+
6
+ from database.database import get_db
7
+ from schemas import AssessmentCreate, AssessmentUpdate, AssessmentRegenerate, AssessmentResponse, AssessmentListResponse, AssessmentDetailedResponse
8
+ from services import create_assessment, get_assessment, get_assessments_by_job, update_assessment, regenerate_assessment, delete_assessment
9
+ from utils.dependencies import get_current_user
10
+ from models.user import User
11
+ from logging_config import get_logger
12
+
13
+ # Create logger for this module
14
+ logger = get_logger(__name__)
15
+
16
+ router = APIRouter(prefix="/assessments", tags=["assessments"])
17
+
18
+ @router.get("/jobs/{jid}", response_model=AssessmentListResponse)
19
+ def get_assessments_list(jid: str, page: int = 1, limit: int = 10, db: Session = Depends(get_db)):
20
+ """Get list of assessments for a job"""
21
+ logger.info(f"Retrieving assessments list for job ID: {jid}, page: {page}, limit: {limit}")
22
+ skip = (page - 1) * limit
23
+ assessments = get_assessments_by_job(db, jid, skip=skip, limit=limit)
24
+
25
+ # Calculate total count
26
+ total = len(get_assessments_by_job(db, jid, skip=0, limit=1000)) # Simplified for demo
27
+
28
+ # Convert questions from JSON string to list and add questions_count
29
+ assessment_responses = []
30
+ for assessment in assessments:
31
+ assessment_dict = assessment.__dict__
32
+ if assessment.questions:
33
+ assessment_dict['questions'] = json.loads(assessment.questions)
34
+ else:
35
+ assessment_dict['questions'] = []
36
+
37
+ # Add questions count
38
+ assessment_dict['questions_count'] = len(assessment_dict['questions'])
39
+ assessment_responses.append(AssessmentResponse(**assessment_dict))
40
+
41
+ logger.info(f"Successfully retrieved {len(assessments)} assessments out of total {total} for job ID: {jid}")
42
+ return AssessmentListResponse(
43
+ count=len(assessments),
44
+ total=total,
45
+ data=assessment_responses
46
+ )
47
+
48
+ @router.get("/jobs/{jid}/{aid}", response_model=AssessmentDetailedResponse)
49
+ def get_assessment_details(jid: str, aid: str, db: Session = Depends(get_db)):
50
+ """Get assessment details"""
51
+ logger.info(f"Retrieving assessment details for job ID: {jid}, assessment ID: {aid}")
52
+ assessment = get_assessment(db, aid)
53
+ if not assessment or assessment.job_id != jid:
54
+ logger.warning(f"Assessment not found for job ID: {jid}, assessment ID: {aid}")
55
+ raise HTTPException(
56
+ status_code=status.HTTP_404_NOT_FOUND,
57
+ detail="Assessment not found for this job"
58
+ )
59
+
60
+ # Convert questions from JSON string to list and add questions_count
61
+ assessment_dict = assessment.__dict__
62
+ if assessment.questions:
63
+ assessment_dict['questions'] = json.loads(assessment.questions)
64
+ else:
65
+ assessment_dict['questions'] = []
66
+
67
+ assessment_dict['questions_count'] = len(assessment_dict['questions'])
68
+
69
+ logger.info(f"Successfully retrieved assessment details for job ID: {jid}, assessment ID: {assessment.id}")
70
+ return AssessmentDetailedResponse(**assessment_dict)
71
+
72
+ @router.post("/jobs/{id}", response_model=dict) # Returns just id as per requirements
73
+ def create_new_assessment(id: str, assessment: AssessmentCreate, db: Session = Depends(get_db), current_user: User = Depends(get_current_user)):
74
+ """Create a new assessment for a job"""
75
+ logger.info(f"Creating new assessment for job ID: {id}, title: {assessment.title} by user: {current_user.id}")
76
+ # Only HR users can create assessments
77
+ if current_user.role != "hr":
78
+ logger.warning(f"Unauthorized attempt to create assessment by user: {current_user.id} with role: {current_user.role}")
79
+ raise HTTPException(
80
+ status_code=status.HTTP_403_FORBIDDEN,
81
+ detail="Only HR users can create assessments"
82
+ )
83
+ db_assessment = create_assessment(db, id, assessment)
84
+ logger.info(f"Successfully created assessment with ID: {db_assessment.id} for job ID: {id}")
85
+ return {"id": db_assessment.id}
86
+
87
+ @router.patch("/jobs/{jid}/{aid}/regenerate")
88
+ def regenerate_assessment(jid: str, aid: str, regenerate_data: AssessmentRegenerate, db: Session = Depends(get_db), current_user: User = Depends(get_current_user)):
89
+ """Regenerate an assessment"""
90
+ logger.info(f"Regenerating assessment for job ID: {jid}, assessment ID: {aid} by user: {current_user.id}")
91
+ # Only HR users can regenerate assessments
92
+ if current_user.role != "hr":
93
+ logger.warning(f"Unauthorized attempt to regenerate assessment by user: {current_user.id} with role: {current_user.role}")
94
+ raise HTTPException(
95
+ status_code=status.HTTP_403_FORBIDDEN,
96
+ detail="Only HR users can regenerate assessments"
97
+ )
98
+ updated_assessment = regenerate_assessment(db, aid, **regenerate_data.model_dump(exclude_unset=True))
99
+ if not updated_assessment:
100
+ logger.warning(f"Assessment not found for regeneration with job ID: {jid}, assessment ID: {aid}")
101
+ raise HTTPException(
102
+ status_code=status.HTTP_404_NOT_FOUND,
103
+ detail="Assessment not found"
104
+ )
105
+ logger.info(f"Successfully regenerated assessment with ID: {updated_assessment.id} for job ID: {jid}")
106
+ return {}
107
+
108
+ @router.patch("/jobs/{jid}/{aid}")
109
+ def update_existing_assessment(jid: str, aid: str, assessment_update: AssessmentUpdate, db: Session = Depends(get_db), current_user: User = Depends(get_current_user)):
110
+ """Update an existing assessment"""
111
+ logger.info(f"Updating assessment for job ID: {jid}, assessment ID: {aid} by user: {current_user.id}")
112
+ # Only HR users can update assessments
113
+ if current_user.role != "hr":
114
+ logger.warning(f"Unauthorized attempt to update assessment by user: {current_user.id} with role: {current_user.role}")
115
+ raise HTTPException(
116
+ status_code=status.HTTP_403_FORBIDDEN,
117
+ detail="Only HR users can update assessments"
118
+ )
119
+ updated_assessment = update_assessment(db, aid, **assessment_update.dict(exclude_unset=True))
120
+ if not updated_assessment:
121
+ logger.warning(f"Assessment not found for update with job ID: {jid}, assessment ID: {aid}")
122
+ raise HTTPException(
123
+ status_code=status.HTTP_404_NOT_FOUND,
124
+ detail="Assessment not found"
125
+ )
126
+ logger.info(f"Successfully updated assessment with ID: {updated_assessment.id} for job ID: {jid}")
127
+ return {}
128
+
129
+ @router.delete("/jobs/{jid}/{aid}")
130
+ def delete_existing_assessment(jid: str, aid: str, db: Session = Depends(get_db), current_user: User = Depends(get_current_user)):
131
+ """Delete an assessment"""
132
+ logger.info(f"Deleting assessment for job ID: {jid}, assessment ID: {aid} by user: {current_user.id}")
133
+ # Only HR users can delete assessments
134
+ if current_user.role != "hr":
135
+ logger.warning(f"Unauthorized attempt to delete assessment by user: {current_user.id} with role: {current_user.role}")
136
+ raise HTTPException(
137
+ status_code=status.HTTP_403_FORBIDDEN,
138
+ detail="Only HR users can delete assessments"
139
+ )
140
+ success = delete_assessment(db, aid)
141
+ if not success:
142
+ logger.warning(f"Assessment not found for deletion with job ID: {jid}, assessment ID: {aid}")
143
+ raise HTTPException(
144
+ status_code=status.HTTP_404_NOT_FOUND,
145
+ detail="Assessment not found"
146
+ )
147
+ logger.info(f"Successfully deleted assessment with ID: {aid} for job ID: {jid}")
148
+ return {}
backend/api/job_routes.py ADDED
@@ -0,0 +1,127 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter, Depends, HTTPException, status
2
+ from sqlalchemy.orm import Session
3
+ from typing import List
4
+ import json
5
+
6
+ from database.database import get_db
7
+ from schemas import JobCreate, JobUpdate, JobResponse, JobListResponse
8
+ from services import create_job, get_job, get_active_jobs, update_job, delete_job, get_job_applicants_count
9
+ from utils.dependencies import get_current_user
10
+ from models.user import User
11
+ from logging_config import get_logger
12
+
13
+ # Create logger for this module
14
+ logger = get_logger(__name__)
15
+
16
+ router = APIRouter(prefix="/jobs", tags=["jobs"])
17
+
18
+ @router.get("", response_model=JobListResponse)
19
+ def get_jobs_list(page: int = 1, limit: int = 10, db: Session = Depends(get_db)):
20
+ """Get list of jobs"""
21
+ logger.info(f"Retrieving jobs list - page: {page}, limit: {limit}")
22
+ skip = (page - 1) * limit
23
+ jobs = get_active_jobs(db, skip=skip, limit=limit)
24
+
25
+ # Calculate total count
26
+ total = len(get_active_jobs(db, skip=0, limit=1000)) # Simplified for demo
27
+
28
+ # Convert skill_categories from JSON string to list and add applicants_count
29
+ job_responses = []
30
+ for job in jobs:
31
+ job_dict = job.__dict__
32
+ if job.skill_categories:
33
+ job_dict['skill_categories'] = json.loads(job.skill_categories)
34
+ else:
35
+ job_dict['skill_categories'] = []
36
+
37
+ # Add applicants count
38
+ job_dict['applicants_count'] = get_job_applicants_count(db, job.id)
39
+ job_responses.append(JobResponse(**job_dict))
40
+
41
+ logger.info(f"Successfully retrieved {len(jobs)} jobs out of total {total}")
42
+ return JobListResponse(
43
+ count=len(jobs),
44
+ total=total,
45
+ data=job_responses
46
+ )
47
+
48
+ @router.get("/{id}", response_model=JobResponse)
49
+ def get_job_details(id: str, db: Session = Depends(get_db)):
50
+ """Get job details by ID"""
51
+ logger.info(f"Retrieving job details for ID: {id}")
52
+ job = get_job(db, id)
53
+ if not job:
54
+ logger.warning(f"Job not found for ID: {id}")
55
+ raise HTTPException(
56
+ status_code=status.HTTP_404_NOT_FOUND,
57
+ detail="Job not found"
58
+ )
59
+
60
+ # Convert skill_categories from JSON string to list and add applicants_count
61
+ job_dict = job.__dict__
62
+ if job.skill_categories:
63
+ job_dict['skill_categories'] = json.loads(job.skill_categories)
64
+ else:
65
+ job_dict['skill_categories'] = []
66
+
67
+ job_dict['applicants_count'] = get_job_applicants_count(db, job.id)
68
+
69
+ logger.info(f"Successfully retrieved job details for ID: {job.id}")
70
+ return JobResponse(**job_dict)
71
+
72
+ @router.post("", response_model=dict) # Returns just id as per requirements
73
+ def create_new_job(job: JobCreate, db: Session = Depends(get_db), current_user: User = Depends(get_current_user)):
74
+ """Create a new job"""
75
+ logger.info(f"Creating new job with title: {job.title} by user: {current_user.id}")
76
+ # Only HR users can create jobs
77
+ if current_user.role != "hr":
78
+ logger.warning(f"Unauthorized attempt to create job by user: {current_user.id} with role: {current_user.role}")
79
+ raise HTTPException(
80
+ status_code=status.HTTP_403_FORBIDDEN,
81
+ detail="Only HR users can create jobs"
82
+ )
83
+ db_job = create_job(db, job)
84
+ logger.info(f"Successfully created job with ID: {db_job.id}")
85
+ return {"id": db_job.id}
86
+
87
+ @router.patch("/{id}")
88
+ def update_existing_job(id: str, job_update: JobUpdate, db: Session = Depends(get_db), current_user: User = Depends(get_current_user)):
89
+ """Update an existing job"""
90
+ logger.info(f"Updating job with ID: {id} by user: {current_user.id}")
91
+ # Only HR users can update jobs
92
+ if current_user.role != "hr":
93
+ logger.warning(f"Unauthorized attempt to update job by user: {current_user.id} with role: {current_user.role}")
94
+ raise HTTPException(
95
+ status_code=status.HTTP_403_FORBIDDEN,
96
+ detail="Only HR users can update jobs"
97
+ )
98
+ updated_job = update_job(db, id, **job_update.dict(exclude_unset=True))
99
+ if not updated_job:
100
+ logger.warning(f"Job not found for update with ID: {id}")
101
+ raise HTTPException(
102
+ status_code=status.HTTP_404_NOT_FOUND,
103
+ detail="Job not found"
104
+ )
105
+ logger.info(f"Successfully updated job with ID: {updated_job.id}")
106
+ return {}
107
+
108
+ @router.delete("/{id}")
109
+ def delete_existing_job(id: str, db: Session = Depends(get_db), current_user: User = Depends(get_current_user)):
110
+ """Delete a job"""
111
+ logger.info(f"Deleting job with ID: {id} by user: {current_user.id}")
112
+ # Only HR users can delete jobs
113
+ if current_user.role != "hr":
114
+ logger.warning(f"Unauthorized attempt to delete job by user: {current_user.id} with role: {current_user.role}")
115
+ raise HTTPException(
116
+ status_code=status.HTTP_403_FORBIDDEN,
117
+ detail="Only HR users can delete jobs"
118
+ )
119
+ success = delete_job(db, id)
120
+ if not success:
121
+ logger.warning(f"Job not found for deletion with ID: {id}")
122
+ raise HTTPException(
123
+ status_code=status.HTTP_404_NOT_FOUND,
124
+ detail="Job not found"
125
+ )
126
+ logger.info(f"Successfully deleted job with ID: {id}")
127
+ return {}
backend/api/routes.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter, Depends, HTTPException
2
+ from sqlalchemy.orm import Session
3
+
4
+ from database.database import get_db
5
+
6
+ router = APIRouter()
7
+
8
+ # Health check endpoint
9
+ @router.get("/", response_model=dict)
10
+ def read_root():
11
+ """Root endpoint"""
12
+ return {"message": "Welcome to AI-Powered Hiring Assessment Platform API"}
13
+
14
+ @router.get("/health", status_code=200)
15
+ def health_check(db: Session = Depends(get_db)):
16
+ """Health check endpoint to verify API is running and database is accessible"""
17
+ try:
18
+ # Test database connection using SQLAlchemy
19
+ # Execute a simple query to test the connection
20
+ db.execute("SELECT 1")
21
+
22
+ return {
23
+ "status": "healthy",
24
+ "database": "connected",
25
+ "timestamp": "2026-02-02T00:00:00" # Placeholder timestamp
26
+ }
27
+
28
+ except Exception as e:
29
+ raise HTTPException(status_code=500, detail=f"Health check failed: {str(e)}")
backend/api/user_routes.py ADDED
@@ -0,0 +1,76 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import APIRouter, Depends, HTTPException, status
2
+ from sqlalchemy.orm import Session
3
+ import logging
4
+
5
+ from database.database import get_db
6
+ from schemas import UserCreate, UserLogin, UserLogout, UserResponse, TokenResponse
7
+ from services import get_user, login_user_service, register_user_service
8
+ from utils.dependencies import get_current_user
9
+ from models.user import User
10
+ from logging_config import get_logger
11
+
12
+ # Create logger for this module
13
+ logger = get_logger(__name__)
14
+
15
+ router = APIRouter(prefix="/users", tags=["users"])
16
+
17
+ # Registration endpoints
18
+ @router.post("/registration/signup", response_model=TokenResponse)
19
+ def register_user_endpoint(user: UserCreate, db: Session = Depends(get_db)):
20
+ """Register a new user"""
21
+ logger.info(f"Registering new user with email: {user.email}")
22
+
23
+ # Use the authentication service to register the user and generate a token
24
+ token_response = register_user_service(db, user)
25
+ return token_response
26
+
27
+ @router.post("/registration/login", response_model=TokenResponse)
28
+ def login_user_endpoint(credentials: UserLogin, db: Session = Depends(get_db)):
29
+ """Login a user"""
30
+ logger.info(f"Login attempt for user: {credentials.email}")
31
+
32
+ # Use the authentication service to login the user and generate a token
33
+ token_response = login_user_service(db, credentials)
34
+ return token_response
35
+
36
+ @router.post("/registration/logout")
37
+ def logout_user(credentials: UserLogout, db: Session = Depends(get_db)):
38
+ """Logout a user"""
39
+ logger.info("User logout request")
40
+ # In a real app, you would invalidate the token here
41
+ # For now, just returning success
42
+ return {}
43
+
44
+ # User endpoints
45
+ @router.get("/me", response_model=UserResponse)
46
+ def get_current_user_data(current_user: User = Depends(get_current_user)):
47
+ """Get current user's details based on their token"""
48
+ logger.info(f"Retrieving current user details for ID: {current_user.id}")
49
+
50
+ # Return the current user's data extracted from the token
51
+ logger.info(f"Successfully retrieved current user details for ID: {current_user.id}")
52
+ return current_user
53
+
54
+ @router.get("/{id}", response_model=UserResponse)
55
+ def get_user_details(id: str, current_user: User = Depends(get_current_user), db: Session = Depends(get_db)):
56
+ """Get user details by ID"""
57
+ logger.info(f"Retrieving user details for ID: {id} by user: {current_user.id}")
58
+
59
+ # Users can only retrieve their own details, unless they are HR
60
+ if current_user.id != id and current_user.role != "hr":
61
+ logger.warning(f"Unauthorized attempt to access user details by user: {current_user.id} for user: {id}")
62
+ raise HTTPException(
63
+ status_code=status.HTTP_403_FORBIDDEN,
64
+ detail="You can only access your own user details"
65
+ )
66
+
67
+ user = get_user(db, id)
68
+ if not user:
69
+ logger.warning(f"User not found for ID: {id}")
70
+ raise HTTPException(
71
+ status_code=status.HTTP_404_NOT_FOUND,
72
+ detail="User not found"
73
+ )
74
+
75
+ logger.info(f"Successfully retrieved user details for ID: {user.id}")
76
+ return user
backend/config.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pydantic_settings import BaseSettings
2
+ from typing import Optional
3
+ import os
4
+ from dotenv import load_dotenv
5
+
6
+ # Load environment variables from .env file
7
+ load_dotenv()
8
+
9
+ class Settings(BaseSettings):
10
+ # Database Configuration
11
+ database_url: str = "sqlite:///./assessment_platform.db"
12
+
13
+ # Server Configuration
14
+ host: str = "0.0.0.0"
15
+ port: int = 8000
16
+ debug: bool = False
17
+
18
+ # Logging Configuration
19
+ log_level: str = "INFO"
20
+ log_file: str = "app.log"
21
+ log_format: str = "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
22
+
23
+ # JWT Configuration (for future use)
24
+ secret_key: str = "your-secret-key-here"
25
+ algorithm: str = "HS256"
26
+ access_token_expire_minutes: int = 30
27
+
28
+ # Application Configuration
29
+ app_name: str = "AI-Powered Hiring Assessment Platform"
30
+ app_version: str = "0.1.0"
31
+ app_description: str = "MVP for managing hiring assessments using AI"
32
+
33
+ class Config:
34
+ env_file = ".env"
35
+ case_sensitive = False
36
+
37
+ # Create a single instance of settings
38
+ settings = Settings()
backend/database/__init__.py ADDED
File without changes
backend/database/database.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sqlalchemy import create_engine
2
+ from sqlalchemy.ext.declarative import declarative_base
3
+ from sqlalchemy.orm import sessionmaker
4
+ from config import settings
5
+ from logging_config import get_logger
6
+
7
+ # Create logger for this module
8
+ logger = get_logger(__name__)
9
+
10
+ # Database setup using SQLAlchemy
11
+ engine = create_engine(
12
+ settings.database_url, connect_args={"check_same_thread": False}
13
+ )
14
+ SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
15
+
16
+ Base = declarative_base()
17
+
18
+ def get_db():
19
+ """Dependency to get database session"""
20
+ logger.debug("Creating database session")
21
+ db = SessionLocal()
22
+ try:
23
+ yield db
24
+ finally:
25
+ logger.debug("Closing database session")
26
+ db.close()
backend/docker-compose.yml ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ version: '3.8'
2
+
3
+ services:
4
+ backend:
5
+ build: .
6
+ ports:
7
+ - "8000:8000"
8
+ volumes:
9
+ - .:/app
10
+ - app-static-files:/app/static
11
+ - ./logs:/app/logs
12
+ environment:
13
+ - DATABASE_URL=sqlite:///./assessment_platform.db
14
+ - HOST=0.0.0.0
15
+ - PORT=8000
16
+ - DEBUG=True
17
+ - LOG_LEVEL=INFO
18
+ - LOG_FILE=app.log
19
+ - SECRET_KEY=your-secret-key-here
20
+ - ALGORITHM=HS256
21
+ - ACCESS_TOKEN_EXPIRE_MINUTES=30
22
+ - APP_NAME=AI-Powered Hiring Assessment Platform
23
+ - APP_VERSION=0.1.0
24
+ - APP_DESCRIPTION=MVP for managing hiring assessments using AI
25
+ networks:
26
+ - app-network
27
+
28
+ volumes:
29
+ app-static-files:
30
+
31
+ networks:
32
+ app-network:
33
+ driver: bridge
backend/integrations/ai_integration/ai_factory.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
+ from typing import Dict, Type
3
+ from integrations.ai_integration.ai_generator_interface import AIGeneratorInterface
4
+ from integrations.ai_integration.mock_ai_generator import MockAIGenerator
5
+ from integrations.ai_integration.openai_generator import OpenAIGenerator
6
+ from integrations.ai_integration.anthropic_generator import AnthropicGenerator
7
+ from integrations.ai_integration.google_ai_generator import GoogleAIGenerator
8
+
9
+
10
+ class AIProvider(Enum):
11
+ """
12
+ Enum representing different AI providers that can be used.
13
+ """
14
+ MOCK = "mock"
15
+ OPENAI = "openai"
16
+ ANTHROPIC = "anthropic"
17
+ GOOGLE = "google"
18
+
19
+
20
+ class AIGeneratorFactory:
21
+ """
22
+ Factory class for creating AI generator instances.
23
+ Allows for easy addition of new AI providers without changing existing code.
24
+ """
25
+
26
+ _providers: Dict[AIProvider, Type[AIGeneratorInterface]] = {}
27
+
28
+ @classmethod
29
+ def register_provider(cls, provider: AIProvider, generator_class: Type[AIGeneratorInterface]):
30
+ """
31
+ Register a new AI provider with the factory.
32
+
33
+ Args:
34
+ provider: The AI provider enum value
35
+ generator_class: The class that implements AIGeneratorInterface
36
+ """
37
+ cls._providers[provider] = generator_class
38
+
39
+ @classmethod
40
+ def create_generator(cls, provider: AIProvider) -> AIGeneratorInterface:
41
+ """
42
+ Create an instance of the specified AI generator.
43
+
44
+ Args:
45
+ provider: The AI provider to instantiate
46
+
47
+ Returns:
48
+ An instance of the requested AI generator
49
+
50
+ Raises:
51
+ ValueError: If the provider is not registered
52
+ """
53
+ if provider not in cls._providers:
54
+ raise ValueError(f"AI provider {provider} is not registered")
55
+
56
+ generator_class = cls._providers[provider]
57
+ return generator_class()
58
+
59
+ @classmethod
60
+ def get_available_providers(cls) -> list:
61
+ """
62
+ Get a list of available AI providers.
63
+
64
+ Returns:
65
+ List of available AI providers
66
+ """
67
+ return list(cls._providers.keys())
68
+
69
+
70
+ # Register all available providers
71
+ AIGeneratorFactory.register_provider(AIProvider.MOCK, MockAIGenerator)
72
+ AIGeneratorFactory.register_provider(AIProvider.OPENAI, OpenAIGenerator)
73
+ AIGeneratorFactory.register_provider(AIProvider.ANTHROPIC, AnthropicGenerator)
74
+ AIGeneratorFactory.register_provider(AIProvider.GOOGLE, GoogleAIGenerator)
75
+
76
+
77
+ # Optional: Create a default provider
78
+ DEFAULT_PROVIDER = AIProvider.MOCK
backend/integrations/ai_integration/ai_generator_interface.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC, abstractmethod
2
+ from typing import List, Dict, Any
3
+ from schemas.assessment import AssessmentQuestion
4
+
5
+
6
+ class AIGeneratorInterface(ABC):
7
+ """
8
+ Interface for AI question generators.
9
+ Defines the contract that all AI providers must implement.
10
+ """
11
+
12
+ @abstractmethod
13
+ def generate_questions(
14
+ self,
15
+ title: str,
16
+ questions_types: List[str],
17
+ additional_note: str = None,
18
+ job_info: Dict[str, Any] = None
19
+ ) -> List[AssessmentQuestion]:
20
+ """
21
+ Generate questions based on the assessment title, job information, and specified question types.
22
+
23
+ Args:
24
+ title: The title of the assessment
25
+ questions_types: List of question types to generate (choose_one, choose_many, text_based)
26
+ additional_note: Additional information to guide question generation
27
+ job_info: Information about the job the assessment is for
28
+
29
+ Returns:
30
+ List of generated AssessmentQuestion objects
31
+ """
32
+ pass
33
+
34
+ @abstractmethod
35
+ def score_answer(
36
+ self,
37
+ question: AssessmentQuestion,
38
+ answer_text: str,
39
+ selected_options: List[str] = None
40
+ ) -> Dict[str, Any]:
41
+ """
42
+ Score an answer based on the question and the provided answer.
43
+
44
+ Args:
45
+ question: The question being answered
46
+ answer_text: The text of the answer (for text-based questions)
47
+ selected_options: Selected options (for multiple choice questions)
48
+
49
+ Returns:
50
+ Dictionary containing score information:
51
+ {
52
+ 'score': float, # Score between 0 and 1
53
+ 'rationale': str, # Explanation of the score
54
+ 'correct': bool # Whether the answer is correct
55
+ }
56
+ """
57
+ pass
backend/integrations/ai_integration/anthropic_generator.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Dict, Any
2
+ from schemas.assessment import AssessmentQuestion
3
+ from integrations.ai_integration.ai_generator_interface import AIGeneratorInterface
4
+
5
+
6
+ class AnthropicGenerator(AIGeneratorInterface):
7
+ """
8
+ Anthropic Generator implementation.
9
+ This is a placeholder that will be implemented when integrating with Anthropic API.
10
+ """
11
+
12
+ def generate_questions(
13
+ self,
14
+ title: str,
15
+ questions_types: List[str],
16
+ additional_note: str = None,
17
+ job_info: Dict[str, Any] = None
18
+ ) -> List[AssessmentQuestion]:
19
+ """
20
+ Generate questions using Anthropic API.
21
+ This is a placeholder implementation.
22
+ """
23
+ # In a real implementation, this would call the Anthropic API
24
+ # For now, we'll raise an exception indicating it's not implemented
25
+ raise NotImplementedError("Anthropic integration not yet implemented")
26
+
27
+ def score_answer(
28
+ self,
29
+ question: AssessmentQuestion,
30
+ answer_text: str,
31
+ selected_options: List[str] = None
32
+ ) -> Dict[str, Any]:
33
+ """
34
+ Score an answer using Anthropic API.
35
+ This is a placeholder implementation.
36
+ """
37
+ # In a real implementation, this would call the Anthropic API
38
+ # For now, we'll raise an exception indicating it's not implemented
39
+ raise NotImplementedError("Anthropic answer scoring not yet implemented")
backend/integrations/ai_integration/google_ai_generator.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Dict, Any
2
+ from schemas.assessment import AssessmentQuestion
3
+ from integrations.ai_integration.ai_generator_interface import AIGeneratorInterface
4
+
5
+
6
+ class GoogleAIGenerator(AIGeneratorInterface):
7
+ """
8
+ Google AI Generator implementation.
9
+ This is a placeholder that will be implemented when integrating with Google AI API.
10
+ """
11
+
12
+ def generate_questions(
13
+ self,
14
+ title: str,
15
+ questions_types: List[str],
16
+ additional_note: str = None,
17
+ job_info: Dict[str, Any] = None
18
+ ) -> List[AssessmentQuestion]:
19
+ """
20
+ Generate questions using Google AI API.
21
+ This is a placeholder implementation.
22
+ """
23
+ # In a real implementation, this would call the Google AI API
24
+ # For now, we'll raise an exception indicating it's not implemented
25
+ raise NotImplementedError("Google AI integration not yet implemented")
26
+
27
+ def score_answer(
28
+ self,
29
+ question: AssessmentQuestion,
30
+ answer_text: str,
31
+ selected_options: List[str] = None
32
+ ) -> Dict[str, Any]:
33
+ """
34
+ Score an answer using Google AI API.
35
+ This is a placeholder implementation.
36
+ """
37
+ # In a real implementation, this would call the Google AI API
38
+ # For now, we'll raise an exception indicating it's not implemented
39
+ raise NotImplementedError("Google AI answer scoring not yet implemented")
backend/integrations/ai_integration/mock_ai_generator.py ADDED
@@ -0,0 +1,378 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import random
2
+ import uuid
3
+ from typing import List, Dict, Any
4
+ from schemas.assessment import AssessmentQuestion, AssessmentQuestionOption
5
+ from schemas.enums import QuestionType
6
+ from integrations.ai_integration.ai_generator_interface import AIGeneratorInterface
7
+
8
+
9
+ class MockAIGenerator(AIGeneratorInterface):
10
+ """
11
+ Mock AI Generator implementation for testing purposes.
12
+ Generates questions based on predefined templates and job information.
13
+ """
14
+
15
+ def generate_questions(
16
+ self,
17
+ title: str,
18
+ questions_types: List[str],
19
+ additional_note: str = None,
20
+ job_info: Dict[str, Any] = None
21
+ ) -> List[AssessmentQuestion]:
22
+ """
23
+ Generate questions using mock AI logic based on job information.
24
+ """
25
+ num_questions = len(questions_types)
26
+ generated_questions = []
27
+
28
+ for i, q_type in enumerate(questions_types):
29
+ # Create a question ID
30
+ question_id = str(uuid.uuid4())
31
+
32
+ # Generate question text based on the assessment title, job info and question type
33
+ question_text = self._generate_question_text(title, q_type, i+1, additional_note, job_info)
34
+
35
+ # Determine weight (random between 1-5)
36
+ weight = random.randint(1, 5)
37
+
38
+ # Generate skill categories based on the assessment title and job info
39
+ skill_categories = self._generate_skill_categories(title, job_info)
40
+
41
+ # Generate options and correct options based on the question type
42
+ options = []
43
+ correct_options = []
44
+
45
+ if q_type in [QuestionType.choose_one.value, QuestionType.choose_many.value]:
46
+ options = self._generate_multiple_choice_options(q_type, question_text)
47
+ correct_options = self._select_correct_options(options, q_type)
48
+
49
+ # Create the AssessmentQuestion object
50
+ question = AssessmentQuestion(
51
+ id=question_id,
52
+ text=question_text,
53
+ weight=weight,
54
+ skill_categories=skill_categories,
55
+ type=QuestionType(q_type),
56
+ options=options,
57
+ correct_options=correct_options
58
+ )
59
+
60
+ generated_questions.append(question)
61
+
62
+ return generated_questions
63
+
64
+ def _generate_question_text(self, title: str, q_type: str, question_number: int, additional_note: str = None, job_info: Dict[str, Any] = None) -> str:
65
+ """Generate a question text based on the assessment title, job info and question type."""
66
+ # Normalize the title to lowercase for processing
67
+ normalized_title = title.lower()
68
+
69
+ # Use job information if available to enhance question relevance
70
+ job_title = job_info.get('title', '') if job_info else ''
71
+ job_description = job_info.get('description', '') if job_info else ''
72
+ job_seniority = job_info.get('seniority', '') if job_info else ''
73
+ job_skills = job_info.get('skill_categories', []) if job_info else []
74
+
75
+ # Create a base question depending on the assessment title and job information
76
+ if "python" in normalized_title or "programming" in normalized_title or "python" in job_title.lower() or "programming" in job_title.lower():
77
+ base_questions = [
78
+ f"What is the correct way to declare a variable in {title}?",
79
+ f"How would you implement a function to solve a problem in {title}?",
80
+ f"Which of the following is a characteristic of {title}?",
81
+ f"What is the time complexity of this operation in {title}?",
82
+ f"In {title}, what is the purpose of this code snippet?",
83
+ f"What is the output of this {title} code?",
84
+ f"Which {title} concept is best suited for this scenario?",
85
+ f"What is the main advantage of using {title} in this context?"
86
+ ]
87
+ elif "software" in normalized_title or "engineer" in normalized_title or "software" in job_title.lower() or "engineer" in job_title.lower():
88
+ base_questions = [
89
+ f"What is the most efficient approach to design a system for {title}?",
90
+ f"Which software development principle applies to {title}?",
91
+ f"How would you optimize the performance of a {title} application?",
92
+ f"What is the best practice for error handling in {title}?",
93
+ f"Which testing methodology is most appropriate for {title}?",
94
+ f"What architectural pattern would you recommend for {title}?",
95
+ f"How would you ensure scalability in {title}?",
96
+ f"What security consideration is important for {title}?"
97
+ ]
98
+ elif "data" in normalized_title or "analysis" in normalized_title or "data" in job_title.lower() or "analysis" in job_title.lower():
99
+ base_questions = [
100
+ f"How would you clean and preprocess data for {title}?",
101
+ f"Which statistical method is appropriate for {title}?",
102
+ f"What visualization technique best represents {title}?",
103
+ f"How would you handle missing values in {title}?",
104
+ f"What is the correlation between variables in {title}?",
105
+ f"Which machine learning model is suitable for {title}?",
106
+ f"How would you validate the results of {title}?",
107
+ f"What ethical consideration applies to {title}?"
108
+ ]
109
+ elif job_skills: # If job has specific skill categories, use them to generate relevant questions
110
+ # Join the skills to form a context
111
+ skills_context = ", ".join(job_skills)
112
+ base_questions = [
113
+ f"How would you apply {skills_context} skills in this {title} role?",
114
+ f"What challenges might you face using {skills_context} in this position?",
115
+ f"Which {skills_context} techniques are most relevant for this {title}?",
116
+ f"How would you leverage your {skills_context} experience in this role?",
117
+ f"What {skills_context} methodologies would you use for this {title}?",
118
+ f"How do {skills_context} skills contribute to success in this position?",
119
+ f"What {skills_context} tools would be most effective for this {title}?",
120
+ f"How would you apply {skills_context} best practices in this role?"
121
+ ]
122
+ else:
123
+ # Generic questions if title doesn't match known patterns
124
+ base_questions = [
125
+ f"What is the fundamental concept behind {title}?",
126
+ f"How would you approach solving a problem in {title}?",
127
+ f"What are the key characteristics of {title}?",
128
+ f"What is the main purpose of {title}?",
129
+ f"Which principle governs {title}?",
130
+ f"How does {title} differ from similar concepts?",
131
+ f"What are the advantages of using {title}?",
132
+ f"What limitations should be considered in {title}?"
133
+ ]
134
+
135
+ # Select a question based on the question number to ensure variety
136
+ question_index = (question_number * 7) % len(base_questions) # Use prime number to create variation
137
+ question_text = base_questions[question_index]
138
+
139
+ # Add context from additional note if provided
140
+ if additional_note:
141
+ question_text += f" ({additional_note})"
142
+
143
+ # Add context from job description if available
144
+ if job_description:
145
+ question_text += f" Consider the following job description: {job_description[:100]}..." # Truncate to avoid overly long questions
146
+
147
+ return question_text
148
+
149
+ def _generate_skill_categories(self, title: str, job_info: Dict[str, Any] = None) -> List[str]:
150
+ """Generate skill categories based on the assessment title and job information."""
151
+ normalized_title = title.lower()
152
+ categories = ["general"]
153
+
154
+ # Use job information if available to enhance category relevance
155
+ job_title = job_info.get('title', '') if job_info else ''
156
+ job_seniority = job_info.get('seniority', '') if job_info else ''
157
+ job_skills = job_info.get('skill_categories', []) if job_info else []
158
+
159
+ # Combine title and job title for broader matching
160
+ combined_title = f"{title} {job_title}".lower()
161
+
162
+ if "python" in combined_title:
163
+ categories.extend(["python", "programming", "backend"])
164
+ elif "javascript" in combined_title or "js" in combined_title:
165
+ categories.extend(["javascript", "programming", "frontend"])
166
+ elif "react" in combined_title:
167
+ categories.extend(["react", "javascript", "frontend"])
168
+ elif "data" in combined_title or "analysis" in combined_title:
169
+ categories.extend(["data-analysis", "statistics", "visualization"])
170
+ elif "machine learning" in combined_title or "ml" in combined_title:
171
+ categories.extend(["machine-learning", "algorithms", "data-science"])
172
+ elif "devops" in combined_title:
173
+ categories.extend(["devops", "ci/cd", "infrastructure"])
174
+ elif "security" in combined_title:
175
+ categories.extend(["security", "cybersecurity", "vulnerability"])
176
+ elif "software" in combined_title or "engineer" in combined_title:
177
+ categories.extend(["software-engineering", "design-patterns", "algorithms"])
178
+
179
+ # Add job-specific skills if available
180
+ if job_skills:
181
+ categories.extend(job_skills)
182
+
183
+ # Add seniority-specific categories
184
+ if job_seniority:
185
+ if job_seniority == "intern":
186
+ categories.extend(["learning", "basic-concepts", "mentoring"])
187
+ elif job_seniority == "junior":
188
+ categories.extend(["development", "coding", "implementation"])
189
+ elif job_seniority == "mid":
190
+ categories.extend(["problem-solving", "architecture", "teamwork"])
191
+ elif job_seniority == "senior":
192
+ categories.extend(["leadership", "architecture", "decision-making"])
193
+
194
+ # Add a few more generic categories
195
+ categories.extend(["problem-solving", "critical-thinking"])
196
+
197
+ # Limit to 5 categories max to prevent overly long lists
198
+ return list(set(categories))[:5]
199
+
200
+ def _generate_multiple_choice_options(self, q_type: str, question_text: str) -> List[AssessmentQuestionOption]:
201
+ """Generate multiple choice options for a question."""
202
+ options = []
203
+
204
+ # Generate 3-5 options depending on the question
205
+ num_options = random.randint(3, 5)
206
+
207
+ for i in range(num_options):
208
+ option_letter = chr(ord('a') + i) # 'a', 'b', 'c', etc.
209
+
210
+ # Create option text based on the question
211
+ if "python" in question_text.lower():
212
+ option_texts = [
213
+ f"Option {option_letter}: This approach uses Python's built-in functions",
214
+ f"Option {option_letter}: This solution involves a custom class implementation",
215
+ f"Option {option_letter}: This method leverages external libraries",
216
+ f"Option {option_letter}: This technique uses recursion",
217
+ f"Option {option_letter}: This algorithm has O(n) time complexity",
218
+ f"Option {option_letter}: This pattern follows Python best practices"
219
+ ]
220
+ elif "software" in question_text.lower() or "design" in question_text.lower():
221
+ option_texts = [
222
+ f"Option {option_letter}: This follows the singleton pattern",
223
+ f"Option {option_letter}: This implements the observer pattern",
224
+ f"Option {option_letter}: This uses the factory method",
225
+ f"Option {option_letter}: This applies the decorator pattern",
226
+ f"Option {option_letter}: This utilizes microservices architecture",
227
+ f"Option {option_letter}: This employs event-driven design"
228
+ ]
229
+ else:
230
+ option_texts = [
231
+ f"Option {option_letter}: This is the correct approach",
232
+ f"Option {option_letter}: This is an alternative method",
233
+ f"Option {option_letter}: This is a common misconception",
234
+ f"Option {option_letter}: This relates to advanced concepts",
235
+ f"Option {option_letter}: This is a basic implementation",
236
+ f"Option {option_letter}: This is an outdated approach"
237
+ ]
238
+
239
+ # Select an option text based on the option index
240
+ option_index = (i * 11) % len(option_texts) # Use prime number for variation
241
+ option_text = option_texts[option_index]
242
+
243
+ option = AssessmentQuestionOption(
244
+ text=option_text,
245
+ value=option_letter
246
+ )
247
+
248
+ options.append(option)
249
+
250
+ return options
251
+
252
+ def _select_correct_options(self, options: List[AssessmentQuestionOption], q_type: str) -> List[str]:
253
+ """Select the correct options for a question."""
254
+ if not options:
255
+ return []
256
+
257
+ # For 'choose_one', select exactly one correct option
258
+ if q_type == QuestionType.choose_one.value:
259
+ # Randomly select one option as correct (index 0 to len(options)-1)
260
+ correct_index = random.randint(0, len(options) - 1)
261
+ return [options[correct_index].value]
262
+
263
+ # For 'choose_many', select 1-2 correct options
264
+ elif q_type == QuestionType.choose_many.value:
265
+ # Randomly decide how many correct options (1 or 2)
266
+ num_correct = random.randint(1, min(2, len(options)))
267
+
268
+ # Randomly select indices for correct options
269
+ correct_indices = random.sample(range(len(options)), num_correct)
270
+
271
+ # Return the values of the selected correct options
272
+ return [options[i].value for i in correct_indices]
273
+
274
+ # For other types, return empty list
275
+ return []
276
+
277
+ def score_answer(
278
+ self,
279
+ question: AssessmentQuestion,
280
+ answer_text: str,
281
+ selected_options: List[str] = None
282
+ ) -> Dict[str, Any]:
283
+ """
284
+ Score an answer based on the question and the provided answer.
285
+
286
+ Args:
287
+ question: The question being answered
288
+ answer_text: The text of the answer (for text-based questions)
289
+ selected_options: Selected options (for multiple choice questions)
290
+
291
+ Returns:
292
+ Dictionary containing score information:
293
+ {
294
+ 'score': float, # Score between 0 and 1
295
+ 'rationale': str, # Explanation of the score
296
+ 'correct': bool # Whether the answer is correct
297
+ }
298
+ """
299
+ # For multiple choice questions - score directly without AI
300
+ if question.type in [QuestionType.choose_one, QuestionType.choose_many]:
301
+ if selected_options is None:
302
+ selected_options = []
303
+
304
+ # Check if the selected options match the correct options
305
+ correct = set(selected_options) == set(question.correct_options)
306
+
307
+ if correct:
308
+ score = 1.0
309
+ rationale = f"The selected options {selected_options} match the correct options {question.correct_options}."
310
+ else:
311
+ score = 0.0
312
+ rationale = f"The selected options {selected_options} do not match the correct options {question.correct_options}."
313
+
314
+ return {
315
+ 'score': score,
316
+ 'rationale': rationale,
317
+ 'correct': correct
318
+ }
319
+
320
+ # For text-based questions - this is where AI evaluation would happen
321
+ elif question.type == QuestionType.text_based:
322
+ # For mock implementation, we'll give a score based on whether text is provided
323
+ if answer_text and answer_text.strip():
324
+ # In a real implementation, this would use AI to evaluate the quality of the answer
325
+ # For now, we'll simulate a more nuanced scoring
326
+ # Consider factors like length, keywords related to the question, etc.
327
+ score = self._evaluate_text_answer(answer_text, question.text)
328
+ rationale = f"The text answer was evaluated with score {score}."
329
+ else:
330
+ score = 0.0
331
+ rationale = "No answer was provided."
332
+
333
+ return {
334
+ 'score': score,
335
+ 'rationale': rationale,
336
+ 'correct': score > 0.5 # Consider correct if score > 0.5
337
+ }
338
+
339
+ # Default case
340
+ return {
341
+ 'score': 0.0,
342
+ 'rationale': "Unable to score this type of question.",
343
+ 'correct': False
344
+ }
345
+
346
+ def _evaluate_text_answer(self, answer_text: str, question_text: str) -> float:
347
+ """
348
+ Evaluate a text-based answer (simulated AI evaluation).
349
+ In a real implementation, this would call an AI service to evaluate the answer quality.
350
+
351
+ Args:
352
+ answer_text: The text of the answer provided by the user
353
+ question_text: The text of the question being answered
354
+
355
+ Returns:
356
+ Score between 0 and 1
357
+ """
358
+ # Simple heuristics for mock evaluation
359
+ score = 0.0
360
+
361
+ # Check if answer is substantial (not just a few words)
362
+ if len(answer_text.split()) >= 5: # At least 5 words
363
+ score += 0.3
364
+
365
+ # Check if answer contains relevant keywords from the question
366
+ question_keywords = set(question_text.lower().split())
367
+ answer_words = set(answer_text.lower().split())
368
+ common_words = question_keywords.intersection(answer_words)
369
+
370
+ if len(common_words) > 0:
371
+ score += 0.2 # Bonus for mentioning relevant terms
372
+
373
+ # Additional bonus for longer, more detailed answers
374
+ if len(answer_text) > 100:
375
+ score += 0.2
376
+
377
+ # Cap the score at 1.0
378
+ return min(score, 1.0)
backend/integrations/ai_integration/openai_generator.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List, Dict, Any
2
+ from schemas.assessment import AssessmentQuestion
3
+ from integrations.ai_integration.ai_generator_interface import AIGeneratorInterface
4
+
5
+
6
+ class OpenAIGenerator(AIGeneratorInterface):
7
+ """
8
+ OpenAI Generator implementation.
9
+ This is a placeholder that will be implemented when integrating with OpenAI API.
10
+ """
11
+
12
+ def generate_questions(
13
+ self,
14
+ title: str,
15
+ questions_types: List[str],
16
+ additional_note: str = None,
17
+ job_info: Dict[str, Any] = None
18
+ ) -> List[AssessmentQuestion]:
19
+ """
20
+ Generate questions using OpenAI API.
21
+ This is a placeholder implementation.
22
+ """
23
+ # In a real implementation, this would call the OpenAI API
24
+ # For now, we'll raise an exception indicating it's not implemented
25
+ raise NotImplementedError("OpenAI integration not yet implemented")
26
+
27
+ def score_answer(
28
+ self,
29
+ question: AssessmentQuestion,
30
+ answer_text: str,
31
+ selected_options: List[str] = None
32
+ ) -> Dict[str, Any]:
33
+ """
34
+ Score an answer using OpenAI API.
35
+ This is a placeholder implementation.
36
+ """
37
+ # In a real implementation, this would call the OpenAI API
38
+ # For now, we'll raise an exception indicating it's not implemented
39
+ raise NotImplementedError("OpenAI answer scoring not yet implemented")
backend/logging_config.py ADDED
@@ -0,0 +1,37 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ import logging.config
3
+ from config import settings
4
+ import os
5
+
6
+ def setup_logging():
7
+ """Setup logging configuration"""
8
+ # Create logs directory if it doesn't exist
9
+ log_dir = os.path.dirname(settings.log_file)
10
+ if log_dir and not os.path.exists(log_dir):
11
+ os.makedirs(log_dir)
12
+
13
+ # Configure logging
14
+ logging.basicConfig(
15
+ level=getattr(logging, settings.log_level.upper()),
16
+ format=settings.log_format,
17
+ handlers=[
18
+ logging.FileHandler(settings.log_file),
19
+ logging.StreamHandler() # Also log to console
20
+ ]
21
+ )
22
+
23
+ # Create a logger for the application
24
+ logger = logging.getLogger(__name__)
25
+ logger.info(f"Logging initialized with level: {settings.log_level}")
26
+
27
+ return logger
28
+
29
+ # Initialize the logger
30
+ logger = setup_logging()
31
+
32
+ def get_logger(name: str = None):
33
+ """Get a logger instance with the specified name"""
34
+ if name:
35
+ return logging.getLogger(name)
36
+ else:
37
+ return logger
backend/main.py ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from contextlib import asynccontextmanager
2
+ from fastapi import FastAPI
3
+ import os
4
+
5
+ # Import from our modules
6
+ from models import Base
7
+ from database.database import engine
8
+ from api.routes import router as root_router
9
+ from api.user_routes import router as user_router
10
+ from api.job_routes import router as job_router
11
+ from api.assessment_routes import router as assessment_router
12
+ from api.application_routes import router as application_router
13
+ from config import settings
14
+ from logging_config import get_logger
15
+
16
+ # Create logger for this module
17
+ logger = get_logger(__name__)
18
+
19
+ @asynccontextmanager
20
+ async def lifespan(app: FastAPI):
21
+ """Handle startup and shutdown events"""
22
+ # Startup
23
+ logger.info(f"Starting {settings.app_name} v{settings.app_version}")
24
+ logger.info(f"Database URL: {settings.database_url}")
25
+ logger.info("Application started successfully")
26
+ yield
27
+ # Shutdown
28
+ logger.info("Application shutting down")
29
+
30
+ # Initialize FastAPI app with settings
31
+ app = FastAPI(
32
+ title=settings.app_name,
33
+ description=settings.app_description,
34
+ version=settings.app_version,
35
+ lifespan=lifespan
36
+ )
37
+
38
+ # Include API routes
39
+ app.include_router(root_router)
40
+ app.include_router(user_router)
41
+ app.include_router(job_router)
42
+ app.include_router(assessment_router)
43
+ app.include_router(application_router)
44
+
45
+ logger.info("Application routes registered")
46
+
47
+ if __name__ == "__main__":
48
+ import uvicorn
49
+ logger.info(f"Starting server on {settings.host}:{settings.port}")
50
+ uvicorn.run(
51
+ app,
52
+ host=settings.host,
53
+ port=settings.port,
54
+ )
backend/models/__init__.py ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ from .base import Base
2
+ from .user import User
3
+ from .job import Job
4
+ from .assessment import Assessment
5
+ from .application import Application
6
+
7
+ __all__ = ["Base", "User", "Job", "Assessment", "Application"]
backend/models/application.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sqlalchemy import Column, String, Text, ForeignKey
2
+ from .base import Base
3
+ import uuid
4
+
5
+ class Application(Base):
6
+ __tablename__ = "applications"
7
+
8
+ id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()), index=True)
9
+ job_id = Column(String, ForeignKey("jobs.id"), nullable=False)
10
+ assessment_id = Column(String, ForeignKey("assessments.id"), nullable=False)
11
+ user_id = Column(String, ForeignKey("users.id"), nullable=False)
12
+ answers = Column(Text) # Stored as JSON string
backend/models/assessment.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sqlalchemy import Column, String, Integer, Boolean, Text, ForeignKey, CheckConstraint
2
+ from .base import Base
3
+ import uuid
4
+ import json
5
+ from pydantic import ValidationError
6
+ from typing import Dict, Any
7
+
8
+ class Assessment(Base):
9
+ __tablename__ = "assessments"
10
+
11
+ id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()), index=True)
12
+ job_id = Column(String, ForeignKey("jobs.id"), nullable=False)
13
+ title = Column(String, nullable=False)
14
+ duration = Column(Integer) # in seconds
15
+ passing_score = Column(Integer) # range 20-80
16
+ questions = Column(Text) # Stored as JSON string
17
+ active = Column(Boolean, default=True)
18
+
19
+ # Add constraint to ensure passing_score is in range 20-80
20
+ __table_args__ = (
21
+ CheckConstraint(passing_score >= 20, name='passing_score_min'),
22
+ CheckConstraint(passing_score <= 80, name='passing_score_max'),
23
+ )
24
+
25
+ def validate_questions(self) -> bool:
26
+ """Validate the questions JSON structure"""
27
+ try:
28
+ if self.questions:
29
+ parsed_questions = json.loads(self.questions)
30
+ if not isinstance(parsed_questions, list):
31
+ return False
32
+
33
+ # Validate each question
34
+ for question in parsed_questions:
35
+ if not self._validate_single_question(question):
36
+ return False
37
+ return True
38
+ return True
39
+ except (json.JSONDecodeError, TypeError):
40
+ return False
41
+
42
+ def _validate_single_question(self, question: Dict[str, Any]) -> bool:
43
+ """Validate a single question structure"""
44
+ required_fields = {'id', 'text', 'weight', 'skill_categories', 'type'}
45
+ if not all(field in question for field in required_fields):
46
+ return False
47
+
48
+ # Validate weight is in range 1-5
49
+ if not isinstance(question['weight'], int) or question['weight'] < 1 or question['weight'] > 5:
50
+ return False
51
+
52
+ # Validate skill_categories is a list
53
+ if not isinstance(question['skill_categories'], list):
54
+ return False
55
+
56
+ # Validate type is one of the allowed types
57
+ allowed_types = {'choose_one', 'choose_many', 'text_based'}
58
+ if question['type'] not in allowed_types:
59
+ return False
60
+
61
+ return True
backend/models/base.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from sqlalchemy.ext.declarative import declarative_base
2
+
3
+ Base = declarative_base()
backend/models/job.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sqlalchemy import Column, String, Boolean, Text, CheckConstraint
2
+ from .base import Base
3
+ import uuid
4
+ import json
5
+ from typing import List, Optional
6
+
7
+ class Job(Base):
8
+ __tablename__ = "jobs"
9
+
10
+ id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()), index=True)
11
+ title = Column(String, nullable=False)
12
+ seniority = Column(String, nullable=False) # intern, junior, mid, senior
13
+ description = Column(Text)
14
+ skill_categories = Column(String) # Stored as JSON string
15
+ active = Column(Boolean, default=True)
16
+
17
+ # Add constraint to ensure seniority is valid
18
+ __table_args__ = (CheckConstraint(seniority.in_(['intern', 'junior', 'mid', 'senior']), name='valid_seniority'),)
19
+
20
+ def validate_skill_categories(self) -> bool:
21
+ """Validate the skill_categories JSON structure"""
22
+ try:
23
+ if self.skill_categories:
24
+ parsed_categories = json.loads(self.skill_categories)
25
+ if not isinstance(parsed_categories, list):
26
+ return False
27
+ # Validate that all items in the list are strings
28
+ return all(isinstance(cat, str) for cat in parsed_categories)
29
+ return True
30
+ except (json.JSONDecodeError, TypeError):
31
+ return False
backend/models/user.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from sqlalchemy import Column, String, CheckConstraint
2
+ from .base import Base
3
+ import uuid
4
+ from utils.password_utils import get_password_hash, verify_password
5
+ import re
6
+
7
+ class User(Base):
8
+ __tablename__ = "users"
9
+
10
+ id = Column(String, primary_key=True, default=lambda: str(uuid.uuid4()), index=True)
11
+ first_name = Column(String, nullable=False)
12
+ last_name = Column(String, nullable=False)
13
+ email = Column(String, unique=True, index=True, nullable=False)
14
+ password = Column(String, nullable=False)
15
+ role = Column(String, nullable=False) # 'hr' or 'applicant'
16
+
17
+ # Add constraint to ensure role is either 'hr' or 'applicant'
18
+ __table_args__ = (CheckConstraint(role.in_(['hr', 'applicant']), name='valid_role'),)
19
+
20
+ def set_password(self, password: str):
21
+ """Hash and set the user's password"""
22
+ self.password = get_password_hash(password)
23
+
24
+ def check_password(self, password: str) -> bool:
25
+ """Check if the provided password matches the stored hash"""
26
+ return verify_password(password, self.password)
27
+
28
+ def validate_name(self, name: str) -> bool:
29
+ """Validate that the name contains only letters, spaces, hyphens, and apostrophes"""
30
+ if not name:
31
+ return False
32
+ # Allow letters, spaces, hyphens, and apostrophes, with length between 1 and 50
33
+ pattern = r"^[a-zA-Z\s\-']{1,50}$"
34
+ return bool(re.match(pattern, name.strip()))
35
+
36
+ def validate_first_name(self) -> bool:
37
+ """Validate the first name"""
38
+ return self.validate_name(self.first_name)
39
+
40
+ def validate_last_name(self) -> bool:
41
+ """Validate the last name"""
42
+ return self.validate_name(self.last_name)
backend/requirements-dev.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ pytest>=7.0.0
2
+ pytest-cov>=4.0.0
3
+ httpx>=0.23.0
4
+ pytest-asyncio>=0.20.0
5
+ factory-boy>=3.2.0
6
+ Faker>=13.0.0
backend/requirements-test.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ # Testing requirements
2
+ pytest>=7.0.0
3
+ pytest-cov>=4.0.0
4
+ pytest-mock>=3.10.0
5
+ factory-boy>=3.2.0
6
+ faker>=18.0.0
backend/requirements.md ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 1. System Requirements — MVP for AI-Powered Hiring Assessment Platform
2
+ 1.1 Overview
3
+ A platform for managing hiring assessments using AI, serving two primary user types: 1. HR (Human Resources) 2. Candidate
4
+ Goal: Enable HR to create smart assessments, manage them, and review results easily, while allowing candidates to take assessments and review their results.
5
+ 1.2 Primary Users
6
+ 1.2.1 HR
7
+ A user responsible for creating and managing assessments, questions, and reviewing candidate results.
8
+ 1.2.2 Candidate
9
+ A user who takes assessments and reviews their previous results.
10
+ 2. Functional Requirements — HR
11
+ 2.1 Authentication and Account Management
12
+ • HR login using email and password.
13
+ 2.2 Job Roles and Assessment Management
14
+ • Create a new assessment.
15
+ • Enter assessment information:
16
+ 1. Job Title
17
+ 2. Experience Level
18
+ 3. Required Skills
19
+ 4. Job Description
20
+ 5. Number of Questions
21
+ 6. Assessment Description (optional) to be displayed to the candidate
22
+ 2.3 Creating Questions Using AI
23
+ • Create questions through AI chat based on job information.
24
+ • Save questions in a question bank linked to the assessment.
25
+ • Edit questions manually.
26
+ • Edit a question using AI (rewording, difficulty adjustment, clarity improvement).
27
+ 2.4 Managing Question Properties
28
+ • Mark a question as:
29
+ 1. Failing Question / Knockout
30
+ • Add tags to questions to be used as evaluation criteria (e.g., Python, Communication, SQL).
31
+ • Set weight or importance for each question.
32
+ 2.5 Assessment Evaluation and Review
33
+ • Display assessment results in a table containing:
34
+ 1. Candidate Name
35
+ 2. Assessment Date
36
+ 3. Time Taken to Complete
37
+ 4. Final Numeric Score
38
+ 5. Pass/Fail status based on:
39
+  Required level
40
+  Knockout questions
41
+ 6. Candidate score per tag (Tag-based Scores)
42
+ • Review a specific candidate’s answers for each question.
43
+ • Display a summary view:
44
+ 1. Average Scores
45
+ 2. Pass Rate
46
+ 3. Key Strengths and Weaknesses (from AI)
47
+ • Set a score/rating for each question manually or with AI assistance.
48
+ 2.6 Retrieving and Managing Assessments
49
+ • Retrieve specific assessment information (questions, settings, results).
50
+ • HR can edit any question and use AI for assistance.
51
+ 2.7 Sharing Assessments
52
+ • Generate a unique link for the assessment.
53
+ • Enable/disable the assessment link.
54
+ 3. Functional Requirements — Candidate
55
+ 3.1 Authentication and Account Management
56
+ • Candidate login (email + password).
57
+ 3.2 Taking the Assessment
58
+ • Access the assessment via link.
59
+ • Display assessment instructions.
60
+ • Answer the questions.
61
+ • Submit the assessment upon completion.
62
+ 3.3 Reviewing Results
63
+ • Display previous assessment results.
64
+ • Show overall score.
65
+ • Display general feedback (optional).
66
+ 4. Evaluation
67
+ • Each question has a maximum score.
68
+ • Questions can be marked as Knockout.
69
+ • For Knockout questions:
70
+ 1. Candidate must achieve ≥ 50% of the question score to pass the assessment.
71
+ • Final score = sum of all question scores.
72
+ 4.1 AI Evaluation
73
+ • The system sends candidate answers to AI for evaluation.
74
+ • AI returns:
75
+ 1. Suggested score for each question.
76
+ 2. Brief rationale/feedback (optional in MVP).
77
+ • HR can:
78
+ 1. Modify any question score.
79
+ 2. Accept or ignore AI evaluation.
80
+ 4.2 Language
81
+ • The system interface and questions are in English only.
82
+ 4.3 Single HR
83
+ • Only one HR exists (no multi-company or multi-HR support in MVP).
84
+ 5. MVP Core Requirements
85
+ 5.1 Question Types
86
+ • Text Answer questions
87
+ • True / False questions
88
+ • Multiple Choice questions
89
+ 5.2 Evaluation
90
+ • Support manual evaluation by HR.
91
+ • Support AI evaluation suggestion (final decision by HR).
92
+ 5.3 Permissions
93
+ • HR can only see assessments they created.
94
+ • Candidate can only see their own assessments.
95
+ 6. MVP Assumptions
96
+ • No multiple HR roles (each HR is independent).
97
+ • No company/team system in the first version.
98
+ • No advanced anti-cheating mechanisms (camera or tracking).
backend/requirements.txt ADDED
Binary file (1.58 kB). View file
 
backend/run_tests.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Test Runner for AI-Powered Hiring Assessment Platform
3
+ This script runs all tests in the test suite.
4
+ """
5
+
6
+ import unittest
7
+ import sys
8
+ import os
9
+
10
+ # Add the backend directory to the path so imports work
11
+ sys.path.insert(0, os.path.join(os.path.dirname(__file__), '.'))
12
+
13
+ def run_tests():
14
+ """Run all tests in the test suite."""
15
+ print("Running comprehensive test suite for AI-Powered Hiring Assessment Platform...")
16
+
17
+ # Discover and run all tests in the tests directory
18
+ loader = unittest.TestLoader()
19
+ start_dir = 'tests'
20
+ suite = loader.discover(start_dir, pattern='test_*.py')
21
+
22
+ runner = unittest.TextTestRunner(verbosity=2)
23
+ result = runner.run(suite)
24
+
25
+ # Print summary
26
+ print(f"\nTests run: {result.testsRun}")
27
+ print(f"Failures: {len(result.failures)}")
28
+ print(f"Errors: {len(result.errors)}")
29
+ print(f"Success: {result.wasSuccessful()}")
30
+
31
+ return result.wasSuccessful()
32
+
33
+
34
+ if __name__ == '__main__':
35
+ success = run_tests()
36
+ sys.exit(0 if success else 1)
backend/schemas/__init__.py ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .user import UserBase, UserCreate, UserUpdate, UserResponse, UserLogin, UserLogout, TokenResponse
2
+ from .job import JobBase, JobCreate, JobUpdate, JobResponse, JobListResponse
3
+ from .assessment import AssessmentBase, AssessmentCreate, AssessmentUpdate, AssessmentResponse, AssessmentListResponse, AssessmentDetailedResponse, AssessmentRegenerate
4
+ from .application import ApplicationBase, ApplicationCreate, ApplicationUpdate, ApplicationResponse, ApplicationListResponse, ApplicationDetailedResponse, ApplicationDetailedListResponse
5
+
6
+ __all__ = [
7
+ "UserBase", "UserCreate", "UserUpdate", "UserResponse", "UserLogin", "UserLogout", "TokenResponse",
8
+ "JobBase", "JobCreate", "JobUpdate", "JobResponse", "JobListResponse",
9
+ "AssessmentBase", "AssessmentCreate", "AssessmentUpdate", "AssessmentResponse", "AssessmentListResponse", "AssessmentDetailedResponse", "AssessmentRegenerate",
10
+ "ApplicationBase", "ApplicationCreate", "ApplicationUpdate", "ApplicationResponse", "ApplicationListResponse", "ApplicationDetailedResponse", "ApplicationDetailedListResponse"
11
+ ]
backend/schemas/application.py ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, List
2
+ from pydantic import BaseModel, Field
3
+ from .base import BaseSchema
4
+ from .enums import QuestionType
5
+
6
+ class ApplicationAnswer(BaseModel):
7
+ question_id: str = Field(..., min_length=1)
8
+ text: Optional[str] = Field(None, max_length=5000)
9
+ options: Optional[List[str]] = []
10
+
11
+ class ApplicationUser(BaseModel):
12
+ id: str
13
+ first_name: str
14
+ last_name: str
15
+ email: str
16
+
17
+ class ApplicationQuestion(BaseModel):
18
+ id: str = Field(..., min_length=1)
19
+ text: str = Field(..., min_length=1, max_length=1000)
20
+ weight: int = Field(..., ge=1, le=5) # range 1-5
21
+ skill_categories: List[str] = Field(..., min_items=1)
22
+ type: QuestionType
23
+ options: Optional[List[dict]] = [] # Using dict for simplicity
24
+ correct_options: Optional[List[str]] = []
25
+
26
+ class ApplicationAnswerWithQuestion(ApplicationAnswer):
27
+ question_text: str = Field(..., min_length=1, max_length=1000)
28
+ weight: int = Field(..., ge=1, le=5) # range 1-5
29
+ skill_categories: List[str] = Field(..., min_items=1)
30
+ type: QuestionType
31
+ options: Optional[List[dict]] = []
32
+ correct_options: Optional[List[str]] = []
33
+ rationale: str = Field(..., min_length=1, max_length=1000)
34
+
35
+ class ApplicationBase(BaseSchema):
36
+ job_id: str = Field(..., min_length=1)
37
+ assessment_id: str = Field(..., min_length=1)
38
+ user_id: str = Field(..., min_length=1)
39
+ answers: List[ApplicationAnswer] = Field(..., min_items=1)
40
+
41
+ class ApplicationCreate(ApplicationBase):
42
+ pass
43
+
44
+ class ApplicationUpdate(BaseModel):
45
+ answers: Optional[List[ApplicationAnswer]] = Field(None, min_items=1)
46
+
47
+ class ApplicationResponse(ApplicationBase):
48
+ id: str
49
+ score: Optional[float] = None
50
+ passing_score: Optional[float] = None
51
+
52
+ class Config:
53
+ from_attributes = True
54
+
55
+ class ApplicationDetailedResponse(ApplicationResponse):
56
+ user: ApplicationUser
57
+ answers: List[ApplicationAnswerWithQuestion]
58
+
59
+ class ApplicationListResponse(BaseModel):
60
+ count: int
61
+ total: int
62
+ data: List[ApplicationResponse]
63
+
64
+ class ApplicationDetailedListResponse(BaseModel):
65
+ count: int
66
+ total: int
67
+ data: List[ApplicationResponse]
backend/schemas/assessment.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, List
2
+ from pydantic import BaseModel, Field
3
+ from .base import BaseSchema
4
+ from .enums import QuestionType
5
+
6
+ class AssessmentQuestionOption(BaseModel):
7
+ text: str
8
+ value: str
9
+
10
+ class AssessmentQuestion(BaseModel):
11
+ id: str
12
+ text: str
13
+ weight: int = Field(..., ge=1, le=5) # range 1-5
14
+ skill_categories: List[str]
15
+ type: QuestionType
16
+ options: Optional[List[AssessmentQuestionOption]] = []
17
+ correct_options: Optional[List[str]] = []
18
+
19
+ class AssessmentBase(BaseSchema):
20
+ title: str = Field(..., min_length=1, max_length=200)
21
+ duration: Optional[int] = Field(None, ge=1) # Duration in seconds, if provided should be positive
22
+ passing_score: int = Field(..., ge=20, le=80) # range 20-80
23
+ questions: Optional[List[AssessmentQuestion]] = []
24
+ active: bool = True
25
+
26
+ class AssessmentCreate(BaseModel):
27
+ title: str = Field(..., min_length=1, max_length=200)
28
+ passing_score: int = Field(..., ge=20, le=80) # range 20-80
29
+ questions_types: List[QuestionType] # array of enum(choose_one, choose_many, text_based)
30
+ additional_note: Optional[str] = Field(None, max_length=500)
31
+
32
+ class AssessmentUpdate(BaseModel):
33
+ title: Optional[str] = Field(None, min_length=1, max_length=200)
34
+ duration: Optional[int] = Field(None, ge=1) # Duration in seconds, if provided should be positive
35
+ passing_score: Optional[int] = Field(None, ge=20, le=80) # range 20-80
36
+ questions: Optional[List[AssessmentQuestion]] = None
37
+ active: Optional[bool] = None
38
+
39
+ class AssessmentRegenerate(BaseModel):
40
+ questions_types: Optional[List[QuestionType]] = None # array of enum(choose_one, choose_many, text_based)
41
+ additional_note: Optional[str] = Field(None, max_length=500)
42
+
43
+ class AssessmentResponse(AssessmentBase):
44
+ id: str
45
+ questions_count: int = 0
46
+
47
+ class Config:
48
+ from_attributes = True
49
+
50
+ class AssessmentListResponse(BaseModel):
51
+ count: int
52
+ total: int
53
+ data: List[AssessmentResponse]
54
+
55
+ class AssessmentDetailedResponse(AssessmentResponse):
56
+ questions: List[AssessmentQuestion]
backend/schemas/base.py ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ from pydantic import BaseModel
2
+
3
+ class BaseSchema(BaseModel):
4
+ class Config:
5
+ from_attributes = True
backend/schemas/candidate_assessment.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+ from pydantic import BaseModel
3
+ from datetime import datetime
4
+ from .base import BaseSchema
5
+
6
+ class CandidateAssessmentBase(BaseSchema):
7
+ candidate_id: int
8
+ assessment_id: int
9
+ status: str = 'not_started' # 'not_started', 'in_progress', 'completed'
10
+ total_score: Optional[int] = None
11
+
12
+ class CandidateAssessmentCreate(CandidateAssessmentBase):
13
+ candidate_id: int
14
+ assessment_id: int
15
+
16
+ class CandidateAssessmentUpdate(BaseModel):
17
+ status: Optional[str] = None
18
+ total_score: Optional[int] = None
19
+ started_at: Optional[datetime] = None
20
+ completed_at: Optional[datetime] = None
21
+
22
+ class CandidateAssessmentInDB(CandidateAssessmentBase):
23
+ id: int
24
+ started_at: Optional[datetime] = None
25
+ completed_at: Optional[datetime] = None
26
+
27
+ class Config:
28
+ from_attributes = True
backend/schemas/enums.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from enum import Enum
2
+
3
+ class UserRole(str, Enum):
4
+ hr = "hr"
5
+ applicant = "applicant"
6
+
7
+ class JobSeniority(str, Enum):
8
+ intern = "intern"
9
+ junior = "junior"
10
+ mid = "mid"
11
+ senior = "senior"
12
+
13
+ class QuestionType(str, Enum):
14
+ choose_one = "choose_one"
15
+ choose_many = "choose_many"
16
+ text_based = "text_based"
17
+
18
+ class SortByOptions(str, Enum):
19
+ min = "min"
20
+ max = "max"
21
+ created_at = "created_at"
backend/schemas/job.py ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, List
2
+ from pydantic import BaseModel, Field
3
+ from .base import BaseSchema
4
+ from .enums import JobSeniority
5
+
6
+ class JobBase(BaseSchema):
7
+ title: str = Field(..., min_length=1, max_length=200)
8
+ seniority: JobSeniority
9
+ description: Optional[str] = Field(None, max_length=1000)
10
+ skill_categories: Optional[List[str]] = []
11
+ active: bool = True
12
+
13
+ class JobCreate(JobBase):
14
+ title: str = Field(..., min_length=1, max_length=200)
15
+ seniority: JobSeniority
16
+ description: Optional[str] = Field(None, max_length=1000)
17
+
18
+ class JobUpdate(BaseModel):
19
+ title: Optional[str] = Field(None, min_length=1, max_length=200)
20
+ seniority: Optional[JobSeniority] = None
21
+ description: Optional[str] = Field(None, max_length=1000)
22
+ skill_categories: Optional[List[str]] = None
23
+ active: Optional[bool] = None
24
+
25
+ class JobResponse(JobBase):
26
+ id: str
27
+ applicants_count: int = 0
28
+
29
+ class Config:
30
+ from_attributes = True
31
+
32
+ class JobListResponse(BaseModel):
33
+ count: int
34
+ total: int
35
+ data: List[JobResponse]
backend/schemas/question.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional
2
+ from pydantic import BaseModel
3
+ from datetime import datetime
4
+ from .base import BaseSchema
5
+
6
+ class QuestionBase(BaseSchema):
7
+ assessment_id: int
8
+ question_text: str
9
+ question_type: str # 'text', 'true_false', 'multiple_choice'
10
+ is_knockout: bool = False
11
+ weight: int = 1
12
+ max_score: int = 10
13
+
14
+ class QuestionCreate(QuestionBase):
15
+ question_text: str
16
+ question_type: str
17
+
18
+ class QuestionUpdate(BaseModel):
19
+ question_text: Optional[str] = None
20
+ question_type: Optional[str] = None
21
+ is_knockout: Optional[bool] = None
22
+ weight: Optional[int] = None
23
+ max_score: Optional[int] = None
24
+
25
+ class QuestionInDB(QuestionBase):
26
+ id: int
27
+ created_at: datetime
28
+
29
+ class Config:
30
+ from_attributes = True