Fayza38 commited on
Commit
b7be4fe
·
verified ·
1 Parent(s): 29ba8ac

Upload 2 files

Browse files
Files changed (2) hide show
  1. main.py +161 -0
  2. requirements.txt +9 -0
main.py ADDED
@@ -0,0 +1,161 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fastapi import FastAPI, HTTPException
2
+ from pydantic import BaseModel
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
+ from TextToSpeech import text_to_speech
5
+ import torch
6
+ import base64
7
+
8
+ # =========================================
9
+ # ENUM MAPPINGS (Match Backend Enums)
10
+ # =========================================
11
+
12
+ SESSION_TYPES = {
13
+ 1: "technical",
14
+ 2: "softskills"
15
+ }
16
+
17
+ TRACKS = {
18
+ 19: "generalprogramming"
19
+ }
20
+
21
+ # =========================================
22
+ # LOAD MODEL ONCE (Global)
23
+ # =========================================
24
+
25
+ MODEL_PATH = "Fayza38/Question_and_Answer"
26
+
27
+ tokenizer = AutoTokenizer.from_pretrained(MODEL_PATH)
28
+
29
+ model = AutoModelForCausalLM.from_pretrained(
30
+ MODEL_PATH,
31
+ torch_dtype=torch.float32,
32
+ device_map="cpu"
33
+ )
34
+
35
+ app = FastAPI()
36
+
37
+
38
+ # =========================================
39
+ # REQUEST MODEL
40
+ # =========================================
41
+
42
+ class QuestionRequest(BaseModel):
43
+ sessionType: int
44
+ difficultyLevel: int | None = None
45
+ trackName: int
46
+
47
+
48
+ # =========================================
49
+ # HELPER: GENERATE TEXT USING QWEN TEMPLATE
50
+ # =========================================
51
+
52
+ def generate_from_model(prompt: str):
53
+
54
+ messages = [
55
+ {"role": "system", "content": "You are a professional interview question generator."},
56
+ {"role": "user", "content": prompt}
57
+ ]
58
+
59
+ formatted_prompt = tokenizer.apply_chat_template(
60
+ messages,
61
+ tokenize=False,
62
+ add_generation_prompt=True
63
+ )
64
+
65
+ inputs = tokenizer(formatted_prompt, return_tensors="pt")
66
+
67
+ with torch.no_grad():
68
+ outputs = model.generate(
69
+ **inputs,
70
+ max_new_tokens=1200,
71
+ temperature=0.7
72
+ )
73
+
74
+ decoded = tokenizer.decode(outputs[0], skip_special_tokens=True)
75
+
76
+ return decoded
77
+
78
+
79
+ # =========================================
80
+ # PARSE Q/A FORMAT
81
+ # =========================================
82
+
83
+ def parse_qa_blocks(text: str):
84
+
85
+ blocks = text.split("\n\n")
86
+ results = []
87
+
88
+ for block in blocks:
89
+ if "Q:" in block and "A:" in block:
90
+ parts = block.split("A:")
91
+ question = parts[0].replace("Q:", "").strip()
92
+ answer = parts[1].strip()
93
+ results.append((question, answer))
94
+
95
+ return results
96
+
97
+
98
+
99
+
100
+ # =========================================
101
+ # MAIN ENDPOINT
102
+ # =========================================
103
+
104
+ @app.post("/generate-questions")
105
+ def generate_questions(request: QuestionRequest):
106
+
107
+ if request.sessionType not in SESSION_TYPES:
108
+ raise HTTPException(status_code=400, detail="Invalid session type")
109
+
110
+ session_type = SESSION_TYPES[request.sessionType]
111
+
112
+ # ---------------- SOFT SKILLS ----------------
113
+
114
+ if session_type == "softskills":
115
+
116
+ prompt = """
117
+ Generate 10 behavioral interview questions.
118
+ Format exactly as:
119
+ Q: ...
120
+ A: ...
121
+ """
122
+
123
+ # ---------------- TECHNICAL ----------------
124
+
125
+ elif session_type == "technical":
126
+
127
+ if request.trackName not in TRACKS:
128
+ raise HTTPException(status_code=400, detail="Track not supported")
129
+
130
+ difficulty = request.difficultyLevel or 1
131
+
132
+ prompt = f"""
133
+ Generate 10 General Programming interview questions.
134
+ Difficulty level: {difficulty}
135
+ Format exactly as:
136
+ Q: ...
137
+ A: ...
138
+ """
139
+
140
+ else:
141
+ raise HTTPException(status_code=400, detail="Invalid session type")
142
+
143
+ # -------- Generate once --------
144
+ raw_output = generate_from_model(prompt)
145
+
146
+ qa_pairs = parse_qa_blocks(raw_output)
147
+
148
+ if len(qa_pairs) == 0:
149
+ raise HTTPException(status_code=500, detail="Model failed to generate valid Q/A format")
150
+
151
+ response = []
152
+
153
+ for idx, (question, answer) in enumerate(qa_pairs[:10], 1):
154
+
155
+ response.append({
156
+ "questionText": question,
157
+ "questionId": idx,
158
+ "questionIdealAnswer": answer
159
+ })
160
+
161
+ return response
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ fastapi==0.110.0
2
+ uvicorn==0.29.0
3
+ torch==2.2.2
4
+ transformers==4.40.1
5
+ accelerate==0.29.3
6
+ peft==0.10.0
7
+ pydantic==2.6.4
8
+ soundfile
9
+