aki-008 commited on
Commit
e1145b3
·
1 Parent(s): 694009d

feat: quiz endpoint

Browse files
.gitignore CHANGED
@@ -217,3 +217,6 @@ __marimo__/
217
 
218
  playground/psql_driver.ipynb
219
  Backend/Time complexity cheatsheet.pdf
 
 
 
 
217
 
218
  playground/psql_driver.ipynb
219
  Backend/Time complexity cheatsheet.pdf
220
+ test/1000-data-science-questions-answers.json
221
+
222
+ test/test.ipynb
Backend/app/api/v1/endpoints/quiz.py CHANGED
@@ -5,25 +5,20 @@ from typing import List
5
  from app.schema import StudentCreate, StudentUpdate, StudentResponse
6
  from app.models import Student, User
7
  from app.api.deps import get_db, get_current_user
 
8
 
9
  router = APIRouter()
10
 
11
- @router.get("/", response_model=List[StudentResponse])
12
- async def get_all_students(
13
- skip: int = 0,
14
- limit: int = 100,
15
- db: AsyncSession = Depends(get_db),
16
- current_user: User = Depends(get_current_user)
17
- ):
18
- """Get all students with pagination (Protected)"""
19
- try:
20
- result = await db.execute(
21
- select(Student).offset(skip).limit(limit)
22
- )
23
- students = result.scalars().all()
24
- return students
25
- except Exception as e:
26
- raise HTTPException(
27
- status_code=status.HTTP_500_INTERNAL_SERVER_ERROR,
28
- detail=f"Failed to fetch students: {str(e)}"
29
- )
 
5
  from app.schema import StudentCreate, StudentUpdate, StudentResponse
6
  from app.models import Student, User
7
  from app.api.deps import get_db, get_current_user
8
+ from app.schema import Quiz_input
9
 
10
  router = APIRouter()
11
 
12
+ # @router.post("/", response_model=StudentResponse, status_code=status.HTTP_201_CREATED)
13
+ # async def generate_quiz(
14
+ # Input_model: Quiz_input, db: AsyncSession = Depends(get_db),
15
+ # current_user: User = Depends(get_current_user)):
16
+
17
+ # try:
18
+ # if Input_model.parsed_doc and Input_model.user_prompt and Input_model.choice:
19
+
20
+
21
+
22
+ #--------Helper Functions--------#
23
+
24
+ def prompt_builder()
 
 
 
 
 
 
Backend/app/models/tables.py CHANGED
@@ -19,3 +19,4 @@ class User(Base):
19
  username: Mapped[str] = mapped_column(String(50), unique=True, index=True)
20
  email: Mapped[str] = mapped_column(String(100), unique=True, index=True)
21
  hashed_password: Mapped[str] = mapped_column(String(255))
 
 
19
  username: Mapped[str] = mapped_column(String(50), unique=True, index=True)
20
  email: Mapped[str] = mapped_column(String(100), unique=True, index=True)
21
  hashed_password: Mapped[str] = mapped_column(String(255))
22
+
Backend/app/schema/__init__.py CHANGED
@@ -1,3 +1,3 @@
1
- from app.schema.models import StudentCreate, StudentUpdate, StudentResponse, UserCreate, Token, LoginRequest
2
 
3
- __all__ = ["StudentCreate", "StudentUpdate", "StudentResponse", "UserCreate", "Token", "LoginRequest"]
 
1
+ from app.schema.models import StudentCreate, StudentUpdate, StudentResponse, UserCreate, Token, LoginRequest, Quiz_input
2
 
3
+ __all__ = ["StudentCreate", "StudentUpdate", "StudentResponse", "UserCreate", "Token", "LoginRequest", "Quiz_input"]
Backend/app/schema/models.py CHANGED
@@ -1,5 +1,5 @@
1
  from pydantic import BaseModel, EmailStr, Field, field_validator, ConfigDict
2
- from typing import Optional
3
  from datetime import datetime
4
 
5
  class StudentBase(BaseModel):
@@ -46,4 +46,9 @@ class Token(BaseModel):
46
  token_type: str
47
 
48
  class LoginResponse(Token):
49
- username: str
 
 
 
 
 
 
1
  from pydantic import BaseModel, EmailStr, Field, field_validator, ConfigDict
2
+ from typing import Optional, Literal
3
  from datetime import datetime
4
 
5
  class StudentBase(BaseModel):
 
46
  token_type: str
47
 
48
  class LoginResponse(Token):
49
+ username: str
50
+
51
+ class Quiz_input(BaseModel):
52
+ parsed_doc: str
53
+ user_prompt: str
54
+ choice: Literal["mcq", "code"]
test/test.ipynb CHANGED
@@ -50,6 +50,117 @@
50
  "execution_count": null,
51
  "id": "52154570",
52
  "metadata": {},
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  "outputs": [],
54
  "source": []
55
  }
 
50
  "execution_count": null,
51
  "id": "52154570",
52
  "metadata": {},
53
+ "outputs": [
54
+ {
55
+ "ename": "ValidationError",
56
+ "evalue": "4 validation errors for Quiz_input\nparsed_doc\n Field required [type=missing, input_value={}, input_type=dict]\n For further information visit https://errors.pydantic.dev/2.12/v/missing\nuser_prompt\n Field required [type=missing, input_value={}, input_type=dict]\n For further information visit https://errors.pydantic.dev/2.12/v/missing\nmcq_choice\n Field required [type=missing, input_value={}, input_type=dict]\n For further information visit https://errors.pydantic.dev/2.12/v/missing\ncode_choice\n Field required [type=missing, input_value={}, input_type=dict]\n For further information visit https://errors.pydantic.dev/2.12/v/missing",
57
+ "output_type": "error",
58
+ "traceback": [
59
+ "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
60
+ "\u001b[31mValidationError\u001b[39m Traceback (most recent call last)",
61
+ "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[3]\u001b[39m\u001b[32m, line 6\u001b[39m\n\u001b[32m 3\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34mlength\u001b[39m(\u001b[38;5;28minput\u001b[39m: Quiz_input):\n\u001b[32m 4\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28mlen\u001b[39m(\u001b[38;5;28minput\u001b[39m)\n\u001b[32m----> \u001b[39m\u001b[32m6\u001b[39m data = \u001b[43mQuiz_input\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 8\u001b[39m data.parsed_doc = \u001b[33m\"\u001b[39m\u001b[33mhello\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m 9\u001b[39m result = length(data)\n",
62
+ "\u001b[36mFile \u001b[39m\u001b[32m~/miniforge3/envs/prep/lib/python3.13/site-packages/pydantic/main.py:250\u001b[39m, in \u001b[36mBaseModel.__init__\u001b[39m\u001b[34m(self, **data)\u001b[39m\n\u001b[32m 248\u001b[39m \u001b[38;5;66;03m# `__tracebackhide__` tells pytest and some other tools to omit this function from tracebacks\u001b[39;00m\n\u001b[32m 249\u001b[39m __tracebackhide__ = \u001b[38;5;28;01mTrue\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m250\u001b[39m validated_self = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m__pydantic_validator__\u001b[49m\u001b[43m.\u001b[49m\u001b[43mvalidate_python\u001b[49m\u001b[43m(\u001b[49m\u001b[43mdata\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mself_instance\u001b[49m\u001b[43m=\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m)\u001b[49m\n\u001b[32m 251\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m validated_self:\n\u001b[32m 252\u001b[39m warnings.warn(\n\u001b[32m 253\u001b[39m \u001b[33m'\u001b[39m\u001b[33mA custom validator is returning a value other than `self`.\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[33m'\u001b[39m\n\u001b[32m 254\u001b[39m \u001b[33m\"\u001b[39m\u001b[33mReturning anything other than `self` from a top level model validator isn\u001b[39m\u001b[33m'\u001b[39m\u001b[33mt supported when validating via `__init__`.\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[33m\"\u001b[39m\n\u001b[32m 255\u001b[39m \u001b[33m'\u001b[39m\u001b[33mSee the `model_validator` docs (https://docs.pydantic.dev/latest/concepts/validators/#model-validators) for more details.\u001b[39m\u001b[33m'\u001b[39m,\n\u001b[32m 256\u001b[39m stacklevel=\u001b[32m2\u001b[39m,\n\u001b[32m 257\u001b[39m )\n",
63
+ "\u001b[31mValidationError\u001b[39m: 4 validation errors for Quiz_input\nparsed_doc\n Field required [type=missing, input_value={}, input_type=dict]\n For further information visit https://errors.pydantic.dev/2.12/v/missing\nuser_prompt\n Field required [type=missing, input_value={}, input_type=dict]\n For further information visit https://errors.pydantic.dev/2.12/v/missing\nmcq_choice\n Field required [type=missing, input_value={}, input_type=dict]\n For further information visit https://errors.pydantic.dev/2.12/v/missing\ncode_choice\n Field required [type=missing, input_value={}, input_type=dict]\n For further information visit https://errors.pydantic.dev/2.12/v/missing"
64
+ ]
65
+ }
66
+ ],
67
+ "source": [
68
+ "from app.schema.models import Quiz_input\n",
69
+ "\n",
70
+ "def length(input: Quiz_input):\n",
71
+ " return len(input)\n",
72
+ "\n",
73
+ "data = Quiz_input()\n",
74
+ "hello = \"hello\"\n",
75
+ "data.parsed_doc = hello\n",
76
+ "result = length(data)\n",
77
+ "print(result)"
78
+ ]
79
+ },
80
+ {
81
+ "cell_type": "code",
82
+ "execution_count": 2,
83
+ "id": "f0dac635",
84
+ "metadata": {},
85
+ "outputs": [
86
+ {
87
+ "data": {
88
+ "text/plain": [
89
+ "{'parsed_doc': 'hello'}"
90
+ ]
91
+ },
92
+ "execution_count": 2,
93
+ "metadata": {},
94
+ "output_type": "execute_result"
95
+ }
96
+ ],
97
+ "source": [
98
+ "data"
99
+ ]
100
+ },
101
+ {
102
+ "cell_type": "code",
103
+ "execution_count": 2,
104
+ "id": "b964b9c3",
105
+ "metadata": {},
106
+ "outputs": [],
107
+ "source": [
108
+ "import json\n",
109
+ "\n",
110
+ "with open(\"1000-data-science-questions-answers.json\", \"r\") as f:\n",
111
+ " data = json.load(f)\n"
112
+ ]
113
+ },
114
+ {
115
+ "cell_type": "code",
116
+ "execution_count": 4,
117
+ "id": "f2579e63",
118
+ "metadata": {},
119
+ "outputs": [
120
+ {
121
+ "data": {
122
+ "text/plain": [
123
+ "500"
124
+ ]
125
+ },
126
+ "execution_count": 4,
127
+ "metadata": {},
128
+ "output_type": "execute_result"
129
+ }
130
+ ],
131
+ "source": [
132
+ "len(data)"
133
+ ]
134
+ },
135
+ {
136
+ "cell_type": "code",
137
+ "execution_count": 6,
138
+ "id": "005c0e97",
139
+ "metadata": {},
140
+ "outputs": [
141
+ {
142
+ "data": {
143
+ "text/plain": [
144
+ "{'question': 'Which of the following CLI command can also be used to rename files?',\n",
145
+ " 'options': ['rm', 'mv', 'rm -r', 'none of the mentioned'],\n",
146
+ " 'answer': 'b',\n",
147
+ " 'explanation': 'Explanation: mv stands for move.'}"
148
+ ]
149
+ },
150
+ "execution_count": 6,
151
+ "metadata": {},
152
+ "output_type": "execute_result"
153
+ }
154
+ ],
155
+ "source": [
156
+ "data[0]"
157
+ ]
158
+ },
159
+ {
160
+ "cell_type": "code",
161
+ "execution_count": null,
162
+ "id": "8222408a",
163
+ "metadata": {},
164
  "outputs": [],
165
  "source": []
166
  }