jayantp2003 commited on
Commit
078d100
·
verified ·
1 Parent(s): 7bfd799

Upload 8 files

Browse files
Files changed (8) hide show
  1. app.py +354 -0
  2. helper.py +371 -0
  3. helper2.py +122 -0
  4. pages/judge_paper.py +317 -0
  5. pages/learning_analytics.py +380 -0
  6. pages/question_paper.py +583 -0
  7. requirements.txt +10 -0
  8. resources.json +1 -0
app.py ADDED
@@ -0,0 +1,354 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+
3
+ # Page Configuration
4
+ st.set_page_config(
5
+ page_title="Bloomsphere - Cognitive Learning Platform",
6
+ page_icon="🌱",
7
+ layout="wide",
8
+ initial_sidebar_state="collapsed"
9
+ )
10
+
11
+ st.markdown("""
12
+ <style>
13
+ .stMainBlockContainer {
14
+ padding: 32px 50px;
15
+ }
16
+ .stAppHeader {
17
+ position: relative;
18
+ height: 0;
19
+ }
20
+ .st-emotion-cache-oj1fi {
21
+ display: flex
22
+ ;
23
+ justify-content: center;
24
+ align-items: center;
25
+ margin-top: 0px;
26
+ }
27
+ </style>
28
+ """, unsafe_allow_html=True)
29
+
30
+ # Custom CSS with Enhanced Spacing and Animations
31
+ st.markdown("""
32
+ <style>
33
+ :root {
34
+ --primary: #2E7D32;
35
+ --secondary: #388E3C;
36
+ --accent: #81C784;
37
+ --light: #E8F5E9;
38
+ --dark: #1B5E20;
39
+ margin: 0;
40
+ padding: 0;
41
+ }
42
+
43
+ .main {
44
+ background: linear-gradient(135deg, var(--light) 0%, var(--accent) 100%);
45
+ font-family: 'Inter', sans-serif;
46
+ }
47
+
48
+ .header-container {
49
+ text-align: center;
50
+ padding: 4rem 0 8rem 0;
51
+ margin-bottom: 10rem;
52
+ }
53
+
54
+ .main-title {
55
+ color: var(--dark);
56
+ font-size: 3.5rem;
57
+ font-weight: 800;
58
+ margin-bottom: 1rem;
59
+ }
60
+
61
+ .tagline {
62
+ color: var(--secondary);
63
+ font-size: 1.5rem;
64
+ margin-bottom: 3rem;
65
+ }
66
+
67
+ .comparison-section {
68
+ background: rgba(255,255,255,0.9);
69
+ border-radius: 20px;
70
+ padding: 3rem;
71
+ margin: 2rem 0;
72
+ box-shadow: 0 8px 25px rgba(0,0,0,0.05);
73
+ }
74
+
75
+ .bloom-card {
76
+ padding: 1rem;
77
+ margin: 1rem 0;
78
+ background: white;
79
+ border-radius: 20px;
80
+ box-shadow: 0 6px 20px rgba(0,0,0,0.05);
81
+ transition: all 0.3s ease;
82
+ }
83
+
84
+ .bloom-card:hover {
85
+ transform: translateY(-5px);
86
+ box-shadow: 0 10px 30px rgba(0,0,0,0.1);
87
+ }
88
+
89
+ .feature-card {
90
+ padding: 0.5rem;
91
+ background: white;
92
+ border-radius: 20px;
93
+ height: 100%;
94
+ box-shadow: 0 6px 20px rgba(0,0,0,0.05);
95
+ transition: all 0.3s ease;
96
+ }
97
+
98
+ .feature-card:hover {
99
+ transform: translateY(-5px);
100
+ box-shadow: 0 10px 30px rgba(0,0,0,0.1);
101
+ }
102
+
103
+ .footer {
104
+ background: var(--dark);
105
+ color: white;
106
+ padding: 2rem;
107
+ margin-top: 4rem;
108
+ margin-bottom: 0;
109
+ border-radius: 20px 20px 0 0;
110
+ }
111
+
112
+ .problem-column {
113
+ padding: 1.5rem;
114
+ border-right: 2px solid var(--light);
115
+ }
116
+
117
+ .solution-column {
118
+ padding: 1.5rem;
119
+ }
120
+
121
+ @media (max-width: 768px) {
122
+ .problem-column {
123
+ border-right: none;
124
+ border-bottom: 2px solid var(--light);
125
+ padding-bottom: 2rem;
126
+ margin-bottom: 2rem;
127
+ }
128
+ }
129
+ </style>
130
+ """, unsafe_allow_html=True)
131
+
132
+ # App Header
133
+ st.markdown("""
134
+ <div class="header-container">
135
+ <div class="main-title">🌿 Bloomsphere</div>
136
+ <div class="tagline">Next-Gen Cognitive Learning Platform</div>
137
+ </div>
138
+ """, unsafe_allow_html=True)
139
+
140
+ # Educational Landscape Comparison
141
+ with st.container():
142
+ st.markdown("""
143
+ <style>
144
+ .comparison-section {
145
+ text-align: center;
146
+ margin-bottom: 2rem;
147
+ }
148
+ .problem-column, .solution-column {
149
+ padding: 1rem;
150
+ border-radius: 8px;
151
+ }
152
+ .problem-column {
153
+ background-color: #ffe5e5;
154
+ }
155
+ .solution-column {
156
+ background-color: #e5f5e5;
157
+ }
158
+ h3 {
159
+ margin-bottom: 1.5rem;
160
+ text-align: center;
161
+ }
162
+ .problem-column p, .solution-column p {
163
+ color: #444;
164
+ line-height: 1.8;
165
+ }
166
+ </style>
167
+
168
+ <div class="comparison-section">
169
+ <h2 style="color: #333;">Transforming Educational Assessment</h2>
170
+ </div>
171
+ """, unsafe_allow_html=True)
172
+
173
+ col1, col2 = st.columns(2)
174
+
175
+ with col1:
176
+ st.markdown("""
177
+ <div class="problem-column">
178
+ <h3 style="color: #d32f2f;">❌ Current Challenges</h3>
179
+ <p>• One-size-fits-all assessments</p>
180
+ <p>• Subjective difficulty evaluation</p>
181
+ <p>• Limited cognitive level tracking</p>
182
+ <p>• Manual paper creation processes</p>
183
+ <p>• No standardized quality metrics</p>
184
+ </div>
185
+ """, unsafe_allow_html=True)
186
+
187
+ with col2:
188
+ st.markdown("""
189
+ <div class="solution-column">
190
+ <h3 style="color: #007BFF;">✅ Bloomsphere Solution</h3>
191
+ <p>• AI-powered cognitive alignment</p>
192
+ <p>• Real-time Bloom's Taxonomy analysis</p>
193
+ <p>• Detailed skill matrix visualization</p>
194
+ <p>• Automated quality assurance</p>
195
+ <p>• Data-driven insights & reporting</p>
196
+ </div>
197
+ """, unsafe_allow_html=True)
198
+
199
+ # Bloom's Taxonomy Section
200
+ st.markdown("""
201
+ <h1 style="color: var(--dark); text-align: center; margin: 4rem 0 2rem 0;">
202
+ 🧠 Powered by Bloom's Taxonomy Framework
203
+ </h1>
204
+ """, unsafe_allow_html=True)
205
+
206
+ cols = st.columns(3, gap="large")
207
+ with cols[0]:
208
+ st.markdown("""
209
+ <div class="bloom-card">
210
+ <h3 style="color: var(--primary); margin-bottom: 1rem; font-size: 1.3rem;">📚 Remember</h3>
211
+ <p style="color: #666; font-size: 0.95rem; margin-bottom: 1rem;">Foundation of knowledge retention and recall</p>
212
+ <div style="color: #666; font-size: 0.9rem; line-height: 1.6;">
213
+ <div style="margin-bottom: 0.5rem;">• Fact retention</div>
214
+ <div style="margin-bottom: 0.5rem;">• Concept identification</div>
215
+ <div>• Basic comprehension</div>
216
+ </div>
217
+ </div>
218
+
219
+ <div class="bloom-card">
220
+ <h3 style="color: var(--primary); margin-bottom: 1rem; font-size: 1.3rem;">🔍 Analyze</h3>
221
+ <p style="color: #666; font-size: 0.95rem; margin-bottom: 1rem;">Critical examination of information structures</p>
222
+ <div style="color: #666; font-size: 0.9rem; line-height: 1.6;">
223
+ <div style="margin-bottom: 0.5rem;">• Pattern recognition</div>
224
+ <div style="margin-bottom: 0.5rem;">• Relationship mapping</div>
225
+ <div>• Error detection</div>
226
+ </div>
227
+ </div>
228
+ """, unsafe_allow_html=True)
229
+
230
+ with cols[1]:
231
+ st.markdown("""
232
+ <div class="bloom-card">
233
+ <h3 style="color: var(--primary); margin-bottom: 1rem; font-size: 1.3rem;">💡 Understand</h3>
234
+ <p style="color: #666; font-size: 0.95rem; margin-bottom: 1rem;">Meaning construction and interpretation</p>
235
+ <div style="color: #666; font-size: 0.9rem; line-height: 1.6;">
236
+ <div style="margin-bottom: 0.5rem;">• Concept explanation</div>
237
+ <div style="margin-bottom: 0.5rem;">• Information categorization</div>
238
+ <div>• Knowledge translation</div>
239
+ </div>
240
+ </div>
241
+
242
+ <div class="bloom-card">
243
+ <h3 style="color: var(--primary); margin-bottom: 1rem; font-size: 1.3rem;">📈 Evaluate</h3>
244
+ <p style="color: #666; font-size: 0.95rem; margin-bottom: 1rem;">Strategic judgment and decision making</p>
245
+ <div style="color: #666; font-size: 0.9rem; line-height: 1.6;">
246
+ <div style="margin-bottom: 0.5rem;">• Quality assessment</div>
247
+ <div style="margin-bottom: 0.5rem;">• Argument validation</div>
248
+ <div>• Solution critique</div>
249
+ </div>
250
+ </div>
251
+ """, unsafe_allow_html=True)
252
+
253
+ with cols[2]:
254
+ st.markdown("""
255
+ <div class="bloom-card">
256
+ <h3 style="color: var(--primary); margin-bottom: 1rem; font-size: 1.3rem;">🛠️ Apply</h3>
257
+ <p style="color: #666; font-size: 0.95rem; margin-bottom: 1rem;">Practical implementation of knowledge</p>
258
+ <div style="color: #666; font-size: 0.9rem; line-height: 1.6;">
259
+ <div style="margin-bottom: 0.5rem;">• Problem solving</div>
260
+ <div style="margin-bottom: 0.5rem;">• Procedure execution</div>
261
+ <div>• Scenario simulation</div>
262
+ </div>
263
+ </div>
264
+
265
+ <div class="bloom-card">
266
+ <h3 style="color: var(--primary); margin-bottom: 1rem; font-size: 1.3rem;">🚀 Create</h3>
267
+ <p style="color: #666; font-size: 0.95rem; margin-bottom: 1rem;">Original synthesis and innovation</p>
268
+ <div style="color: #666; font-size: 0.9rem; line-height: 1.6;">
269
+ <div style="margin-bottom: 0.5rem;">• Conceptual design</div>
270
+ <div style="margin-bottom: 0.5rem;">• Hypothesis formulation</div>
271
+ <div>• System development</div>
272
+ </div>
273
+ </div>
274
+ """, unsafe_allow_html=True)
275
+
276
+ # Features Section
277
+ st.markdown("""
278
+ <h1 style="color: var(--dark); text-align: center; margin: 4rem 0 3rem 0;">
279
+ ✨ Key Features
280
+ </h1>
281
+ """, unsafe_allow_html=True)
282
+
283
+ # Feature Cards
284
+ feat_col1, feat_col2, feat_col3 = st.columns(3, gap="large")
285
+
286
+ with feat_col1:
287
+ st.markdown("""
288
+ <div class="feature-card">
289
+ <div style="font-size: 2rem; margin-bottom: 1rem; color: var(--primary);">📝</div>
290
+ <h3 style="color: var(--primary); margin-bottom: 1rem; font-size: 1.2rem;">
291
+ Smart Paper Generation
292
+ </h3>
293
+ <p style="color: #666; font-size: 0.95rem; line-height: 1.6;display:flex; text-align: center;">
294
+ Create Bloom's-aligned question papers with defined difficulty balancing
295
+ and cognitive levels.
296
+ </p>
297
+ </div>
298
+ """, unsafe_allow_html=True)
299
+
300
+ with feat_col2:
301
+ st.markdown("""
302
+ <div class="feature-card">
303
+ <div style="font-size: 2rem; margin-bottom: 1rem; color: var(--primary);">🔍</div>
304
+ <h3 style="color: var(--primary); margin-bottom: 1rem; font-size: 1.2rem;">
305
+ Cognitive Analysis
306
+ </h3>
307
+ <p style="color: #666; font-size: 0.95rem; line-height: 1.6; display:flex; text-align: center;">
308
+ Get instant Bloom's Taxonomy breakdowns with complexity scores.
309
+ </p>
310
+ </div>
311
+ """, unsafe_allow_html=True)
312
+
313
+ with feat_col3:
314
+ st.markdown("""
315
+ <div class="feature-card">
316
+ <div style="font-size: 2rem; margin-bottom: 1rem; color: var(--primary);">📊</div>
317
+ <h3 style="color: var(--primary); margin-bottom: 1rem; font-size: 1.2rem;">
318
+ Learning Analytics
319
+ </h3>
320
+ <p style="color: #666; font-size: 0.95rem; line-height: 1.6; display:flex; text-align: center;">
321
+ Track performance by comparing answers with standard rubrics.
322
+ </p>
323
+ </div>
324
+ """, unsafe_allow_html=True)
325
+
326
+ # Action Buttons
327
+ st.markdown("<div style='height: 2rem'></div>", unsafe_allow_html=True)
328
+ btn_col1, btn_col2, btn_col3 = st.columns(3, gap="large")
329
+
330
+ with btn_col1:
331
+ if st.button("Try Question Generator →", key="generator",
332
+ help="Create customized question papers using AI"):
333
+ st.switch_page("pages/question_paper.py")
334
+
335
+ with btn_col2:
336
+ if st.button("Analyze Papers Now →", key="analyzer",
337
+ help="Get detailed paper quality analysis"):
338
+ st.switch_page("pages/judge_paper.py")
339
+
340
+ with btn_col3:
341
+ if st.button("View Analytics →", key="analytics",
342
+ help="Explore learning analytics dashboard"):
343
+ st.switch_page("pages/learning_analytics.py")
344
+
345
+ # Footer
346
+ st.markdown("""
347
+ <div class='footer'>
348
+ <div style='text-align: center;'>
349
+ <h3 style='color: white; margin-bottom: 1rem;'>🌐 Bloomsphere Ecosystem</h3>
350
+ <p style='color: var(--light); margin-bottom: 0.5rem;'>© 2025 Bloomsphere</p>
351
+ <small style='color: var(--accent);'>Cultivating Growth Through Cognitive Science</small>
352
+ </div>
353
+ </div>
354
+ """, unsafe_allow_html=True)
helper.py ADDED
@@ -0,0 +1,371 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import pdfplumber
2
+ from langchain.text_splitter import CharacterTextSplitter
3
+ from openai import OpenAI
4
+ import json
5
+ import numpy as np
6
+ import time
7
+ import re
8
+ from sklearn.feature_extraction.text import TfidfVectorizer
9
+ from sklearn.metrics.pairwise import cosine_similarity
10
+ from dotenv import load_dotenv
11
+ import os
12
+ load_dotenv()
13
+
14
+ def process_file(filepath):
15
+ """Process file and generate chunks"""
16
+ content = []
17
+ with pdfplumber.open(filepath) as pdf:
18
+ for page in pdf.pages:
19
+ text = page.extract_text()
20
+ if text: # Avoid NoneType errors
21
+ content.append(text)
22
+
23
+ # Join extracted text with proper spacing
24
+ full_text = "\n\n".join(content)
25
+
26
+ # Apply chunking
27
+ text_splitter = CharacterTextSplitter(
28
+ chunk_size=50000,
29
+ chunk_overlap=10
30
+ )
31
+
32
+ chunks = text_splitter.split_text(full_text)
33
+
34
+ # Vectorize and get similarities
35
+ query = ""
36
+ vectorizer = TfidfVectorizer()
37
+ vectors = vectorizer.fit_transform([query] + chunks)
38
+ cosine_similarities = cosine_similarity(vectors[0:1], vectors[1:]).flatten()
39
+
40
+ # Select top chunks
41
+ if len(chunks) > 8:
42
+ top_n = int(len(chunks)/2)
43
+ else:
44
+ top_n = len(chunks)
45
+ if len(query) < 5:
46
+ top_n = len(chunks)
47
+ top_indices = cosine_similarities.argsort()[-top_n:][::-1]
48
+
49
+ return chunks
50
+
51
+
52
+ def givemcqquestion(chunks, create, evaluate, analyze, apply, understand, remember, level,questions):
53
+ if level == 1:
54
+ game = "easy and non-tricky with simple options"
55
+ elif level == 2:
56
+ game = "tricky and medium-level lengthy questions"
57
+ elif level == 3:
58
+ game = "hard and tricky and lengthy questions"
59
+ else:
60
+ raise ValueError("Invalid level. Choose 1 (easy), 2 (medium), or 3 (hard).")
61
+
62
+ prompt = f"""You are an AI designed to generate high-quality {game} level multiple-choice questions (MCQs) for educational assessments, following Bloom’s Taxonomy. Your task is to create a well-structured question that aligns with the given cognitive level, ensuring it accurately reflects the required depth of understanding.
63
+
64
+ Instructions:
65
+ For each Bloom’s Taxonomy level, follow the guidelines below to ensure appropriate question complexity:
66
+ - **Knowledge (Remembering)**: Formulate a factual or recall-based question.
67
+ - **Comprehension (Understanding)**: Create a question that requires explanation or interpretation.
68
+ - **Application (Applying)**: Develop a question that applies knowledge to a new situation.
69
+ - **Analysis (Analyzing)**: Design a question that encourages breaking down concepts.
70
+ - **Synthesis (Creating)**: Construct a question requiring idea combination or new approaches.
71
+ - **Evaluation (Evaluating)**: Generate a question that involves judgment or assessment.
72
+
73
+ STRICT RULES:
74
+ - Generate exactly **{questions} MCQ** based on the given context and Bloom’s Taxonomy level.
75
+ - Return the response as a **structured JSON object** without any additional text.
76
+ - The question should reflect the complexity required for the given cognitive level.
77
+ - Options should be **plausible, with only one correct answer** clearly identifiable.
78
+ - Ensure a structured rubric to evaluate student responses.
79
+
80
+ Input Parameters:
81
+ - **Context**: {chunks} (Relevant learning material)
82
+ - **Bloom’s Taxonomy Distribution**:
83
+ - Understanding: {understand*100}%
84
+ - Analysis: {analyze*100}%
85
+ - Evaluation: {evaluate*100}%
86
+ - Synthesis: {create*100}%
87
+ - Application: {apply*100}%
88
+ - Knowledge: {remember*100}%
89
+
90
+ Expected JSON Output Format:
91
+
92
+ {{
93
+ "question": "<Your MCQ Question>",
94
+ "options": {{
95
+ "A": "<Option A>",
96
+ "B": "<Option B>",
97
+ "C": "<Option C>",
98
+ "D": "<Option D>"
99
+ }},
100
+ "correct_answer": "<Correct Option>",
101
+ "rubric": {{
102
+ "key_concept_assessed": "<Briefly explain what is being tested>",
103
+ "criteria_for_correct_answer": "<Explain why the correct answer is correct>",
104
+ "common_misconceptions": "<List potential incorrect assumptions>",
105
+ "cognitive_skill_tested": "<Describe how it aligns with Bloom’s Taxonomy>"
106
+ }}
107
+ }}
108
+
109
+ """
110
+ print("API KEY",os.getenv("API_KEY"))
111
+ print("BASE URL",os.getenv("GENERATOR_BASE_URL"))
112
+ print("MODEL NAME",os.getenv("MODEL_NAME"))
113
+ client = OpenAI(
114
+ api_key=os.getenv("API_KEY"),
115
+ base_url=os.getenv("GENERATOR_BASE_URL")
116
+ )
117
+
118
+ response = client.chat.completions.create(
119
+ model=os.getenv("MODEL_NAME"),
120
+ messages=[{"role": "user", "content": prompt}]
121
+ )
122
+
123
+ mcq = response.choices[0].message.content
124
+ if "```json" in mcq:
125
+ mcq = mcq.replace("```json","")
126
+ mcq = mcq.replace("```","")
127
+ mcq = mcq.replace("\n","")
128
+ mcq = json.loads(mcq)
129
+ return mcq,prompt
130
+
131
+
132
+ def givetruefalsequestion(chunks, create, evaluate, analyze, apply, understand, remember, level,questions):
133
+ if level == 1:
134
+ game = "easy and straightforward statements"
135
+ elif level == 2:
136
+ game = "moderate complexity with slight trickiness"
137
+ elif level == 3:
138
+ game = "complex and tricky statements requiring deep understanding"
139
+ else:
140
+ raise ValueError("Invalid level. Choose 1 (easy), 2 (medium), or 3 (hard).")
141
+
142
+ prompt = f"""You are an AI designed to generate high-quality {game} level **True/False** questions for educational assessments, following Bloom’s Taxonomy. Your task is to create a well-structured **True/False** question that aligns with the given cognitive level, ensuring it accurately reflects the required depth of understanding.
143
+
144
+ ### **Instructions:**
145
+ For each Bloom’s Taxonomy level, follow the guidelines below to ensure appropriate question complexity:
146
+ - **Knowledge (Remembering)**: Generate a straightforward fact-based statement.
147
+ - **Comprehension (Understanding)**: Formulate a statement that requires explanation or interpretation.
148
+ - **Application (Applying)**: Develop a statement that applies knowledge to a new situation.
149
+ - **Analysis (Analyzing)**: Design a statement that involves breaking down concepts.
150
+ - **Synthesis (Creating)**: Construct a statement requiring combining ideas or new approaches.
151
+ - **Evaluation (Evaluating)**: Generate a statement requiring judgment or assessment.
152
+
153
+ ### **STRICT RULES:**
154
+ - Generate exactly **{questions}** True/False question.
155
+ - Return the response as a **structured JSON object** without any additional text.
156
+ - The question should reflect the complexity required for the given cognitive level.
157
+ - Ensure a structured rubric to evaluate student responses.
158
+
159
+ ### **Input Parameters:**
160
+ - **Context**: {chunks} (Relevant learning material)
161
+ - **Bloom’s Taxonomy Distribution**:
162
+ - Understanding: {understand*100}%
163
+ - Analysis: {analyze*100}%
164
+ - Evaluation: {evaluate*100}%
165
+ - Synthesis: {create*100}%
166
+ - Application: {apply*100}%
167
+ - Knowledge: {remember*100}%
168
+
169
+ ### **Expected JSON Output Format:**
170
+ ```json
171
+ {{
172
+ "statement": "<Your True/False Statement>",
173
+ "correct_answer": "<True or False>",
174
+ "rubric": {{
175
+ "key_concept_assessed": "<Briefly explain what is being tested>",
176
+ "criteria_for_correct_answer": "<Explain why the correct answer is correct>",
177
+ "common_misconceptions": "<List potential incorrect assumptions>",
178
+ "cognitive_skill_tested": "<Describe how it aligns with Bloom’s Taxonomy>"
179
+ }}
180
+ }}
181
+ """
182
+ client = OpenAI(
183
+ api_key=os.getenv("API_KEY"),
184
+ base_url=os.getenv("GENERATOR_BASE_URL")
185
+ )
186
+
187
+ response = client.chat.completions.create(
188
+ model=os.getenv("MODEL_NAME"),
189
+ messages=[{"role": "user", "content": prompt}]
190
+ )
191
+
192
+ tf_question = response.choices[0].message.content
193
+ if "```json" in tf_question:
194
+ tf_question = tf_question.replace("```json", "").replace("```", "")
195
+ tf_question = tf_question.replace("\n", "")
196
+ tf_question = json.loads(tf_question)
197
+ return tf_question
198
+
199
+
200
+
201
+
202
+ def giveopenquestion(chunks, create, evaluate, analyze, apply, understand, remember, level, questions):
203
+ # Validate input parameters
204
+ bloom_params = {
205
+ 'create': create,
206
+ 'evaluate': evaluate,
207
+ 'analyze': analyze,
208
+ 'apply': apply,
209
+ 'understand': understand,
210
+ 'remember': remember
211
+ }
212
+
213
+ if not all(0 <= val <= 1 for val in bloom_params.values()):
214
+ raise ValueError("All Bloom's parameters must be between 0 and 1")
215
+
216
+ if level not in [1, 2, 3]:
217
+ raise ValueError("Invalid level. Choose 1 (easy), 2 (medium), or 3 (hard).")
218
+
219
+ # Complexity description
220
+ complexity_levels = {
221
+ 1: "simple recall-based questions",
222
+ 2: "moderate explanation questions",
223
+ 3: "complex analytical questions"
224
+ }
225
+ complexity = complexity_levels.get(level)
226
+
227
+ prompt = f"""Generate {questions} open-ended question(s) based on the provided context, strictly following these requirements:
228
+
229
+ ### CONTEXT:
230
+ {chunks}
231
+
232
+ ### BLOOM'S TAXONOMY DISTRIBUTION:
233
+ - Creating: {create*100}%
234
+ - Evaluating: {evaluate*100}%
235
+ - Analyzing: {analyze*100}%
236
+ - Applying: {apply*100}%
237
+ - Understanding: {understand*100}%
238
+ - Remembering: {remember*100}%
239
+
240
+ ### COGNITIVE LEVEL:
241
+ {complexity} (Level {level})
242
+
243
+ ### OUTPUT REQUIREMENTS:
244
+ - Return ONLY valid JSON format
245
+ - Include detailed rubric with cognitive skill mapping
246
+ - For each question, specify which Bloom's level it primarily targets
247
+
248
+ ### RESPONSE FORMAT:
249
+ ```json
250
+ {{
251
+ "metadata": {{
252
+ "blooms_distribution": {{
253
+ "create": {create},
254
+ "evaluate": {evaluate},
255
+ "analyze": {analyze},
256
+ "apply": {apply},
257
+ "understand": {understand},
258
+ "remember": {remember}
259
+ }},
260
+ "complexity_level": {level}
261
+ }},
262
+ "questions": [
263
+ {{
264
+ "question": "Question text",
265
+ "primary_blooms_level": "create|evaluate|analyze|apply|understand|remember",
266
+ "rubric": {{
267
+ "key_concept": "...",
268
+ "criteria": "...",
269
+ "misconceptions": "...",
270
+ "cognitive_skills": {{
271
+ "primary": "...",
272
+ "secondary": ["...", "..."]
273
+ }}
274
+ }}
275
+ }}
276
+ ]
277
+ }}
278
+ ```
279
+ """
280
+
281
+ client = OpenAI(
282
+ api_key=os.getenv("API_KEY"),
283
+ base_url=os.getenv("GENERATOR_BASE_URL")
284
+ )
285
+
286
+ try:
287
+ response = client.chat.completions.create(
288
+ model=os.getenv("MODEL_NAME"),
289
+ messages=[{"role": "user", "content": prompt}],
290
+ temperature=0.7,
291
+ max_tokens=1200
292
+ )
293
+ raw_response = response.choices[0].message.content
294
+
295
+ # Extract JSON from response
296
+ json_match = re.search(r'```json(.*?)```', raw_response, re.DOTALL)
297
+ json_str = json_match.group(1).strip() if json_match else raw_response.strip()
298
+
299
+ # Parse and validate JSON
300
+ result = json.loads(json_str)
301
+
302
+ # Validate structure
303
+ if not all(key in result for key in ['metadata', 'questions']):
304
+ raise ValueError("Response missing required fields")
305
+
306
+ return result
307
+
308
+ except json.JSONDecodeError as e:
309
+ print(f"JSON Decode Error: {e}")
310
+ print(f"Problematic response:\n{raw_response}")
311
+ raise
312
+ except Exception as e:
313
+ print(f"API Error: {e}")
314
+ raise
315
+
316
+
317
+
318
+ def generate_questions_from_file(filepath, mcq, tf, qna, create, evaluate, analyze, apply, understand, remember, level):
319
+ """Main function to generate questions from file"""
320
+ # Process file first
321
+ chunks = process_file(filepath)
322
+
323
+ # Generate questions using existing functionality
324
+ MAX_RETRIES = 3
325
+ RETRY_DELAY = 1
326
+
327
+ def get_random_chunk():
328
+ return chunks[np.random.randint(len(chunks))] if chunks else ""
329
+
330
+ def generate_questions(q_type, count, generator):
331
+ results = []
332
+ for _ in range(count):
333
+ for attempt in range(MAX_RETRIES):
334
+ try:
335
+ chunk = get_random_chunk()
336
+ question = generator(chunk, create, evaluate, analyze, apply,
337
+ understand, remember, level, questions=1)
338
+ results.append(question)
339
+ break
340
+ except Exception as e:
341
+ print(f"Error generating {q_type} question (attempt {attempt+1}): {str(e)}")
342
+ if attempt == MAX_RETRIES - 1:
343
+ results.append({"error": f"Failed to generate {q_type} question"})
344
+ time.sleep(RETRY_DELAY)
345
+ return results
346
+
347
+ return {
348
+ 'mcq': generate_questions("MCQ", mcq, givemcqquestion),
349
+ 'tf': generate_questions("True/False", tf, givetruefalsequestion),
350
+ 'qna': generate_questions("Q&A", qna, giveopenquestion)
351
+ }
352
+
353
+ if __name__ == "__main__":
354
+ # Example usage
355
+ filepath = "data/eco.pdf"
356
+ mcq = 1
357
+ tf = 1
358
+ qna = 1
359
+ level = 1
360
+ create = 0.2
361
+ evaluate = 0.2
362
+ analyze = 0.2
363
+ apply = 0.2
364
+ understand = 0.2
365
+ remember = 0.2
366
+
367
+ questions = generate_questions_from_file(
368
+ filepath, mcq, tf, qna, create, evaluate,
369
+ analyze, apply, understand, remember, level
370
+ )
371
+ print(questions)
helper2.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import google.generativeai as genai
2
+ from PIL import Image
3
+ from dotenv import load_dotenv
4
+ import os
5
+ load_dotenv()
6
+
7
+ genai.configure(api_key="AIzaSyCo0UmOYwiSKUD7-cCVg94M4U1xIKeoS00")
8
+ def extract_questions_from_image(image_path):
9
+ image = Image.open(image_path)
10
+ model = genai.GenerativeModel("gemini-1.5-flash")
11
+ prompt = (
12
+ "Analyze the provided image and extract all the questions present in it. "
13
+ "Return only the extracted questions in a structured format, as follows:\n\n"
14
+ "**Output Format:**\n"
15
+ "Q1: [First question]\n"
16
+ "Q2: [Second question]\n"
17
+ "Q3: [Third question]\n"
18
+ "... \n\n"
19
+ "If there are no questions in the image, return 'None' without any additional text."
20
+ )
21
+ response = model.generate_content([image, prompt])
22
+ return response.text.strip()
23
+
24
+ from PyPDF2 import PdfReader
25
+ import google.generativeai as genai
26
+
27
+ def extract_questions_from_pdf(pdf_path):
28
+ # Read text from the PDF
29
+ pdf_reader = PdfReader("sst.pdf")
30
+ text = "\n".join([page.extract_text() for page in pdf_reader.pages if page.extract_text()])
31
+
32
+ # Initialize the AI model
33
+ model = genai.GenerativeModel("gemini-1.5-flash")
34
+ prompt = (
35
+ "Analyze the provided text and extract all the questions present in it. "
36
+ "Return only the extracted questions in a structured format, as follows:\n\n"
37
+ "**Output Format:**\n"
38
+ "Q1: [First question]\n"
39
+ "Q2: [Second question]\n"
40
+ "Q3: [Third question]\n"
41
+ "... \n\n"
42
+ "If there are no questions in the text, return 'None' without any additional text."
43
+ )
44
+
45
+ # Generate response
46
+ response = model.generate_content([text, prompt])
47
+ return response.text.strip()
48
+
49
+ file_path = "data/sst.pdf" # Change this to your file path
50
+ if file_path.lower().endswith((".png", ".jpg", ".jpeg")):
51
+ result = extract_questions_from_image(file_path)
52
+ elif file_path.lower().endswith(".pdf"):
53
+ result = extract_questions_from_pdf(file_path)
54
+ else:
55
+ result = "Unsupported file format. Please use PNG, JPG, or PDF."
56
+ # Print the extracted questions
57
+ qw = result.split("\n")
58
+
59
+ from openai import OpenAI
60
+ client = OpenAI(
61
+ api_key=os.getenv('API_KEY'),
62
+ base_url=os.getenv('GENERATOR_BASE_URL'),
63
+ )
64
+ prompt = """
65
+ Analyze the following sentence and classify it according to Bloom's Taxonomy levels.
66
+ Return the results as a probability distribution where the sum of all 6 levels equals 1.
67
+
68
+ Bloom's Taxonomy Levels:
69
+ 1. Remembering: Recall facts and basic concepts
70
+ 2. Understanding: Explain ideas or concepts
71
+ 3. Applying: Use information in new situations
72
+ 4. Analyzing: Draw connections among ideas
73
+ 5. Evaluating: Justify a stand or decision
74
+ 6. Creating: Produce new or original work
75
+
76
+ For the given sentence, provide your assessment in JSON format with the following structure:
77
+ {{
78
+ "remembering": float,
79
+ "understanding": float,
80
+ "applying": float,
81
+ "analyzing": float,
82
+ "evaluating": float,
83
+ "creating": float
84
+ }}
85
+
86
+ Ensure that:
87
+ - Each value is between 0 and 1
88
+ - The sum of all six values equals exactly 1
89
+ - The distribution reflects the cognitive level required by the sentence
90
+
91
+ Sentence to analyze: "{input_sentence}"
92
+
93
+ Return only the JSON output without any additional explanation or commentary.
94
+ """
95
+
96
+ import json
97
+
98
+ # iterate through q1 and pass it to get response
99
+ for i in qw:
100
+ formatted_prompt = prompt.format(input_sentence=i)
101
+ response = client.chat.completions.create(
102
+ model=os.getenv("MODEL_NAME"),
103
+ messages=[{"role": "user", "content": formatted_prompt}]
104
+ )
105
+
106
+ # Extract the content from the response
107
+ response_content = response.choices[0].message.content
108
+
109
+ # Clean the response (remove markdown code blocks if present)
110
+ json_str = response_content.strip().replace('```json', '').replace('```', '').strip()
111
+
112
+ try:
113
+ # Parse the JSON string
114
+ bloom_scores = json.loads(json_str)
115
+
116
+ print("\n")
117
+ print("sentence:", i)
118
+ print("Bloom Score:")
119
+ print(json.dumps(bloom_scores, indent=4)) # Pretty print the JSON
120
+ except json.JSONDecodeError as e:
121
+ print(f"Error parsing JSON: {e}")
122
+ print("Raw response:", response_content)
pages/judge_paper.py ADDED
@@ -0,0 +1,317 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import os
3
+ from dotenv import load_dotenv
4
+ import random
5
+ import google.generativeai as genai
6
+ from PIL import Image
7
+ from PyPDF2 import PdfReader
8
+ from openai import OpenAI
9
+ import json
10
+ import tempfile
11
+ # Load environment variables
12
+ load_dotenv()
13
+
14
+ genai.configure(api_key="AIzaSyCo0UmOYwiSKUD7-cCVg94M4U1xIKeoS00")
15
+
16
+ # Page Configuration
17
+ st.set_page_config(page_title="Paper Scoring", page_icon="📄", layout="wide")
18
+
19
+ st.markdown("""
20
+ <style>
21
+ .stMainBlockContainer {
22
+ padding: 20px 50px;
23
+ }
24
+ .stAppHeader {
25
+ position: relative;
26
+ height: 0;
27
+ }
28
+ </style>
29
+ """, unsafe_allow_html=True)
30
+
31
+ # Custom CSS for Styling
32
+ st.markdown("""
33
+ <style>
34
+ .upload-container {
35
+ background-color: #f0f2f6;
36
+ border-radius: 10px;
37
+ padding: 20px;
38
+ border: 2px dashed #4a6cf7;
39
+ text-align: center;
40
+ }
41
+
42
+ .score-breakdown {
43
+ background-color: #f8f9fa;
44
+ border-radius: 8px;
45
+ padding: 15px;
46
+ margin-bottom: 15px;
47
+ }
48
+ .score-header {
49
+ font-weight: bold;
50
+ color: #4a6cf7;
51
+ margin-bottom: 10px;
52
+ }
53
+ .stProgress > div > div > div {
54
+ background-color: #4a6cf7;
55
+ }
56
+ </style>
57
+ """, unsafe_allow_html=True)
58
+
59
+ # Back Button
60
+ if st.button("← Back to Dashboard", key="back_btn"):
61
+ st.switch_page("app.py")
62
+
63
+ def parse_questions_from_response(response_text):
64
+ """Parse questions from LLM response and return a list of questions."""
65
+ questions = []
66
+ if response_text and response_text.strip() != 'None':
67
+ lines = response_text.strip().split('\n')
68
+ for line in lines:
69
+ line = line.strip()
70
+ if line:
71
+ # Extract the question part after the "Q#:" prefix
72
+ if ':' in line:
73
+ question = line.split(':', 1)[1].strip()
74
+ questions.append(question)
75
+ else:
76
+ questions.append(line)
77
+ return questions
78
+
79
+ def extract_questions_from_image(image_path):
80
+ image = Image.open(image_path)
81
+ model = genai.GenerativeModel("gemini-1.5-flash")
82
+ prompt = (
83
+ "Extract all questions from this image and format them as a numbered list. "
84
+ "Use this exact format:\n"
85
+ "Q1: [question text]\n"
86
+ "Q2: [question text]\n"
87
+ "...\n"
88
+ "Only include actual questions. Return 'None' if no questions are found.\n"
89
+ "Do not include any other text or explanations."
90
+ )
91
+ response = model.generate_content([image, prompt])
92
+ return parse_questions_from_response(response.text.strip())
93
+
94
+ def extract_questions_from_pdf(pdf_path):
95
+ pdf_reader = PdfReader(pdf_path)
96
+ text = "\n".join([page.extract_text() for page in pdf_reader.pages if page.extract_text()])
97
+
98
+ model = genai.GenerativeModel("gemini-1.5-flash")
99
+ prompt = (
100
+ "Extract all questions from this text and format them as a numbered list. "
101
+ "Use this exact format:\n"
102
+ "Q1: [question text]\n"
103
+ "Q2: [question text]\n"
104
+ "...\n"
105
+ "Only include actual questions. Return 'None' if no questions are found.\n"
106
+ "Do not include any other text or explanations."
107
+ )
108
+
109
+ response = model.generate_content([text, prompt])
110
+ return parse_questions_from_response(response.text.strip())
111
+
112
+ def parse_bloom_scores(response_content):
113
+ """Parse and validate Bloom's taxonomy scores from LLM response."""
114
+ try:
115
+ # Clean the response
116
+ json_str = response_content.strip().replace('```json', '').replace('```', '').strip()
117
+ scores = json.loads(json_str)
118
+
119
+ # Normalize keys to title case to match display format
120
+ normalized_scores = {
121
+ k.title(): v for k, v in scores.items()
122
+ }
123
+
124
+ # Validate required keys
125
+ required_keys = ['Remembering', 'Understanding', 'Applying',
126
+ 'Analyzing', 'Evaluating', 'Creating']
127
+ if not all(key in normalized_scores for key in required_keys):
128
+ raise ValueError("Missing required taxonomy levels")
129
+
130
+ # Validate values are floats between 0 and 1
131
+ for key, value in normalized_scores.items():
132
+ if not isinstance(value, (int, float)) or not 0 <= value <= 1:
133
+ raise ValueError(f"Invalid score value for {key}: {value}")
134
+
135
+ # Normalize scores to ensure they sum to 1
136
+ total = sum(normalized_scores.values())
137
+ if total != 0: # Avoid division by zero
138
+ normalized_scores = {k: v/total for k, v in normalized_scores.items()}
139
+
140
+ return normalized_scores
141
+ except (json.JSONDecodeError, ValueError) as e:
142
+ st.error(f"Error parsing Bloom's taxonomy scores: {str(e)}")
143
+ return None
144
+
145
+ def get_bloom_scores(question):
146
+ client = OpenAI(
147
+ api_key=os.getenv("API_KEY"),
148
+ base_url=os.getenv("GENERATOR_BASE_URL"),
149
+
150
+ )
151
+
152
+ prompt = """
153
+ Analyze the following sentence and classify it according to Bloom's Taxonomy levels.
154
+ Return the results as a probability distribution where the sum of all 6 levels equals 1.
155
+
156
+ Bloom's Taxonomy Levels:
157
+ 1. Remembering: Recall facts and basic concepts
158
+ 2. Understanding: Explain ideas or concepts
159
+ 3. Applying: Use information in new situations
160
+ 4. Analyzing: Draw connections among ideas
161
+ 5. Evaluating: Justify a stand or decision
162
+ 6. Creating: Produce new or original work
163
+
164
+ For the given sentence, provide your assessment ONLY as a JSON object with this exact structure, no other text:
165
+ {
166
+ "remembering": float,
167
+ "understanding": float,
168
+ "applying": float,
169
+ "analyzing": float,
170
+ "evaluating": float,
171
+ "creating": float
172
+ }
173
+ """
174
+
175
+ formatted_prompt = prompt + f'\nSentence to analyze: "{question}"'
176
+
177
+ response = client.chat.completions.create(
178
+ model=os.getenv("MODEL_NAME"),
179
+ messages=[{"role": "user", "content": formatted_prompt}], temperature=0.0
180
+ )
181
+
182
+ return parse_bloom_scores(response.choices[0].message.content.strip())
183
+
184
+ def process_content(uploaded_file=None, text_content=None):
185
+ questions = []
186
+ if uploaded_file:
187
+ with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(uploaded_file.name)[1]) as tmp_file:
188
+ tmp_file.write(uploaded_file.getvalue())
189
+ tmp_path = tmp_file.name
190
+
191
+ if uploaded_file.name.lower().endswith((".png", ".jpg", ".jpeg")):
192
+ questions = extract_questions_from_image(tmp_path)
193
+ elif uploaded_file.name.lower().endswith(".pdf"):
194
+ questions = extract_questions_from_pdf(tmp_path)
195
+
196
+ os.unlink(tmp_path)
197
+ elif text_content:
198
+ questions = [text_content]
199
+
200
+ results = []
201
+ for question in questions:
202
+ if question.strip():
203
+ scores = get_bloom_scores(question)
204
+ if scores: # Only add if scores were successfully parsed
205
+ results.append({
206
+ 'question': question,
207
+ 'score': scores
208
+ })
209
+
210
+ return results
211
+
212
+ def main():
213
+ st.markdown("<h1 style='font-size: 28px;'>📄 Academic Paper Scoring System</h1>", unsafe_allow_html=True)
214
+ st.markdown("Evaluate the Quality of Your Academic Paper")
215
+
216
+ # Initialize session state
217
+ if 'question_typed' not in st.session_state:
218
+ st.session_state.question_typed = ""
219
+
220
+ with st.container():
221
+ with st.form(key='paper_scorer_form'):
222
+ st.header("Upload Academic Paper")
223
+
224
+ # File uploader
225
+ uploaded_file = st.file_uploader(
226
+ "Choose a PDF file",
227
+ type=['pdf', 'jpg', 'png', 'jpeg'],
228
+ label_visibility="collapsed"
229
+ )
230
+
231
+ st.markdown("<div style='text-align: center; margin: 20px 0;'><strong>OR</strong></div>",
232
+ unsafe_allow_html=True)
233
+
234
+ # Text input
235
+ st.session_state.question_typed = st.text_area(
236
+ "Paste your question here",
237
+ value=st.session_state.question_typed,
238
+ height=150
239
+ )
240
+
241
+ # Submit button
242
+ submitted = st.form_submit_button(
243
+ "Score Paper",
244
+ use_container_width=True,
245
+ type="primary"
246
+ )
247
+
248
+ # Processing and results display
249
+ if submitted:
250
+ if uploaded_file or st.session_state.question_typed:
251
+ with st.spinner("Analyzing content..."):
252
+ results = process_content(uploaded_file, st.session_state.question_typed)
253
+ if results:
254
+ st.success("Analysis complete!")
255
+ display_results(results)
256
+ else:
257
+ st.error("Failed to process the document. Please try again.")
258
+ else:
259
+ st.warning("Please upload a file or enter text to score")
260
+
261
+ def display_results(results):
262
+ # Calculate total scores
263
+ total_score = {category: 0 for category in ['Remembering', 'Understanding', 'Applying',
264
+ 'Analyzing', 'Evaluating', 'Creating']}
265
+ for item in results:
266
+ for category in total_score:
267
+ total_score[category] += item['score'][category]
268
+
269
+ # Overall score card
270
+ with st.container():
271
+ st.subheader("Overall Bloom's Taxonomy Scores")
272
+ cols = st.columns(6)
273
+ categories = ['Remembering', 'Understanding', 'Applying',
274
+ 'Analyzing', 'Evaluating', 'Creating']
275
+
276
+ for i, (col, category) in enumerate(zip(cols, categories)):
277
+ with col:
278
+ score = round(total_score[category] / len(results), 3)
279
+ color = "#4a6cf7" if score >= 0.7 else "#ffa500" if score >= 0.4 else "#dc3545"
280
+
281
+ st.markdown(f"""
282
+ <div class="score-breakdown">
283
+ <div class="score-header" style="color: {color}">{category}</div>
284
+ <div style="font-size: 24px; color: {color};">{score}/1.0</div>
285
+ </div>
286
+ """, unsafe_allow_html=True)
287
+
288
+ # Detailed breakdown
289
+ with st.expander("View Detailed Question Analysis", expanded=True):
290
+ for idx, item in enumerate(results, 1):
291
+ st.markdown(f'<div class="score-header">Question {idx}: {item["question"]}</div>',
292
+ unsafe_allow_html=True)
293
+
294
+ # Create columns for score display
295
+ cols = st.columns(6)
296
+ for col, category in zip(cols, categories):
297
+ with col:
298
+ score = round(item['score'][category], 3)
299
+ color = "#4a6cf7" if score > 0.7 else "#ffa500" if score > 0.3 else "#dc3545"
300
+
301
+ st.markdown(f"""
302
+ <div style="text-align: center;
303
+ background-color: #f1f1f1;
304
+ border-radius: 5px;
305
+ padding: 5px;
306
+ margin-bottom: 5px;">
307
+ <div style="font-weight: bold; color: {color};">{category}</div>
308
+ <div style="font-size: 18px; color: {color};">{score}/1</div>
309
+ </div>
310
+ """, unsafe_allow_html=True)
311
+
312
+ if idx < len(results):
313
+ st.markdown("---")
314
+
315
+ if __name__ == "__main__":
316
+ main()
317
+
pages/learning_analytics.py ADDED
@@ -0,0 +1,380 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import json
3
+ import re
4
+ import spacy
5
+ from sklearn.feature_extraction.text import TfidfVectorizer
6
+ from openai import OpenAI
7
+ from dotenv import load_dotenv
8
+ import os
9
+ load_dotenv()
10
+ import subprocess
11
+
12
+
13
+ client = OpenAI(
14
+ api_key=os.getenv("API_KEY"),
15
+ base_url=os.getenv("GENERATOR_BASE_URL")
16
+ )
17
+
18
+ def ensure_spacy_model():
19
+ """Ensure the required spaCy model is installed."""
20
+ model_name = "en_core_web_sm"
21
+ try:
22
+ spacy.load(model_name)
23
+ except OSError:
24
+ print(f"Downloading spaCy model: {model_name}...")
25
+ subprocess.run(["python", "-m", "spacy", "download", model_name], check=True)
26
+
27
+ def extract_key_components(rubric_text):
28
+ """Dynamically extract key terms from rubric using NLP."""
29
+ ensure_spacy_model() # Ensure the model is downloaded
30
+ nlp = spacy.load("en_core_web_sm")
31
+ doc = nlp(rubric_text)
32
+
33
+ entities = set(ent.text.lower() for ent in doc.ents)
34
+ noun_chunks = set(chunk.text.lower() for chunk in doc.noun_chunks)
35
+
36
+ vectorizer = TfidfVectorizer(ngram_range=(1, 2), stop_words='english')
37
+ tfidf_matrix = vectorizer.fit_transform([rubric_text])
38
+ feature_names = vectorizer.get_feature_names_out()
39
+ tfidf_scores = zip(feature_names, tfidf_matrix.toarray()[0])
40
+ top_terms = [term for term, score in sorted(tfidf_scores, key=lambda x: x[1], reverse=True)[:15]]
41
+
42
+ return list(entities.union(noun_chunks).union(top_terms))
43
+
44
+ def evaluate_student_answer(student_answer, rubric):
45
+ def preprocess(text):
46
+ text = text.lower()
47
+ text = re.sub(r'[^\w\s]', '', text)
48
+ return text.strip()
49
+
50
+ cleaned_answer = preprocess(student_answer)
51
+ features = {}
52
+
53
+ rubric_text = f"{rubric['criteria_for_correct_answer']} {rubric['common_misconceptions']}"
54
+ key_terms = extract_key_components(rubric_text)
55
+ features['keyword_coverage'] = sum(1 for term in key_terms if term in cleaned_answer)/len(key_terms)
56
+
57
+ doc = spacy.load("en_core_web_sm")(student_answer)
58
+ features['key_entities_present'] = len([ent for ent in doc.ents if ent.text.lower() in key_terms])
59
+
60
+ prompt = f"""Evaluate this answer against the rubric. Consider:
61
+ - Keyword matches: {features.get('keyword_coverage', 0)*100:.1f}%
62
+ - Key entities found: {features.get('key_entities_present', 0)}
63
+
64
+ Rubric Criteria:
65
+ {rubric['criteria_for_correct_answer']}
66
+
67
+ Common Misconceptions:
68
+ {rubric['common_misconceptions']}
69
+
70
+ Student Answer:
71
+ {student_answer}
72
+
73
+ Return JSON with score (0-10), breakdown - (accuracy, relevance and completeness), and feedback strictly."""
74
+
75
+ try:
76
+ response = client.chat.completions.create(
77
+ model=os.getenv("MODEL_NAME"),
78
+ messages=[{"role": "user", "content": prompt}],
79
+ temperature=0.1,
80
+ response_format={"type": "json_object"}
81
+ )
82
+ llm_output = response.choices[0].message.content
83
+ return json.loads(llm_output)
84
+ except Exception as e:
85
+ print(f"API Error: {str(e)}")
86
+ return {
87
+ "score": 0,
88
+ "breakdown": {"accuracy": 0, "relevance": 0, "completeness": 0},
89
+ "feedback": "Evaluation service unavailable"
90
+ }
91
+
92
+ # -------------------- Streamlit UI --------------------
93
+ st.set_page_config(page_title="Answer Evaluation System", layout="wide", page_icon="📘")
94
+
95
+
96
+ st.markdown("""
97
+ <style>
98
+ .stMainBlockContainer {
99
+ padding: 20px 50px;
100
+ }
101
+ .stAppHeader {
102
+ position: relative;
103
+ height: 0;
104
+ }
105
+ </style>
106
+ """, unsafe_allow_html=True)
107
+
108
+
109
+ st.markdown("""
110
+ <style>
111
+ .main {padding: 2rem 3rem;}
112
+ .header {color: #2b3b52; border-bottom: 2px solid #eee;}
113
+ .stTextArea textarea {border: 1px solid #e1e4e8 !important;}
114
+ .score-container {background: #f8f9fa; border-radius: 10px; padding: 25px; margin: 20px 0;}
115
+ .feedback-box {background: #fffbe6; border-left: 4px solid #ffd700; border-radius: 5px; padding: 20px; margin: 25px 0;}
116
+ .metric-box {background: white; border-radius: 8px; padding: 20px; margin: 10px; box-shadow: 0 2px 4px rgba(0,0,0,0.1);}
117
+ .metric-box h1 {color: #2b3b52; margin: 5px 0;}
118
+ .stButton button {transition: all 0.3s ease;}
119
+ .stButton button:hover {transform: translateY(-2px);}
120
+ </style>
121
+ """, unsafe_allow_html=True)
122
+
123
+ EXAMPLES = {
124
+ "Select an example...": {"rubric": {}, "answer": ""},
125
+
126
+ "LDA Analysis": {
127
+ "rubric": {
128
+ "key_concept_assessed": "Understanding of Linear Discriminant Analysis (LDA) as a supervised dimensionality reduction technique and its application in pattern recognition",
129
+ "criteria_for_correct_answer": (
130
+ "A complete answer should:\n"
131
+ "1. Differentiate LDA from PCA in terms of supervision and objective\n"
132
+ "2. Explain the mathematical goal of maximizing between-class variance while minimizing within-class variance\n"
133
+ "3. Describe the assumption of normal distribution and equal class covariance matrices\n"
134
+ "4. Provide real-world applications in fields like bioinformatics or facial recognition"
135
+ ),
136
+ "common_misconceptions": (
137
+ "1. Confusing LDA with Latent Dirichlet Allocation (same acronym)\n"
138
+ "2. Believing LDA is primarily a classification algorithm rather than dimensionality reduction\n"
139
+ "3. Assuming LDA requires no normality assumptions\n"
140
+ "4. Thinking LDA and PCA are interchangeable for unsupervised problems"
141
+ ),
142
+ "cognitive_skill_tested": (
143
+ "Analysis: Requires breaking down LDA's mathematical framework\n"
144
+ "Evaluation: Comparing/contrasting with similar techniques like PCA\n"
145
+ "Application: Demonstrating understanding through practical use cases"
146
+ )
147
+ },
148
+ "answer": (
149
+ "Linear Discriminant Analysis (LDA) is a supervised dimensionality reduction technique that maximizes class separability by:\n\n"
150
+ "1. Calculating between-class and within-class scatter matrices\n"
151
+ "2. Finding linear combinations of features that maximize Fisher's ratio: (between-class variance)/(within-class variance)\n"
152
+ "3. Assuming multivariate normal distributions with equal covariance across classes\n\n"
153
+ "Key applications include:\n"
154
+ "- Preprocessing for classification tasks in speech recognition\n"
155
+ "- Gene expression analysis in bioinformatics\n"
156
+ "- Feature extraction in computer vision systems\n\n"
157
+ "Unlike PCA which maximizes variance without class information, LDA explicitly uses class labels to find discriminative directions."
158
+ )
159
+ },
160
+
161
+ "Climate Change Basics": {
162
+ "rubric": {
163
+ "key_concept_assessed": "Understanding of anthropogenic climate change mechanisms and evidence-based reasoning",
164
+ "criteria_for_correct_answer": (
165
+ "An exemplary response must:\n"
166
+ "1. Identify main greenhouse gases (CO₂, CH₄, N₂O) and their sources\n"
167
+ "2. Explain the enhanced greenhouse effect using radiative forcing\n"
168
+ "3. Distinguish between natural climate variability and anthropogenic forcing\n"
169
+ "4. Reference IPCC assessment reports and paleoclimate evidence"
170
+ ),
171
+ "common_misconceptions": (
172
+ "1. Equating ozone depletion with climate change\n"
173
+ "2. Attributing current warming solely to solar cycles\n"
174
+ "3. Confusing weather variability with long-term climate trends\n"
175
+ "4. Overemphasizing natural CO₂ sources while ignoring anthropogenic contributions"
176
+ ),
177
+ "cognitive_skill_tested": (
178
+ "Comprehension: Interpreting climate proxies and modern observations\n"
179
+ "Evaluation: Assessing credibility of different evidence types\n"
180
+ "Synthesis: Integrating physical, chemical, and biological data"
181
+ )
182
+ },
183
+ "answer": (
184
+ "Modern climate change is primarily driven by human activities through:\n\n"
185
+ "1. Fossil fuel combustion (75% of CO₂ emissions)\n"
186
+ "2. Agricultural practices (40% of CH₄ from livestock and rice paddies)\n"
187
+ "3. Deforestation reducing carbon sinks (12-17% of anthropogenic emissions)\n\n"
188
+ "Key evidence includes:\n"
189
+ "- 50% increase in atmospheric CO₂ since 1750 (415 ppm vs 280 ppm pre-industrial)\n"
190
+ "- Isotopic fingerprint showing fossil fuel origin of CO₂ increase\n"
191
+ "- Stratospheric cooling/tropospheric warming pattern characteristic of greenhouse forcing\n"
192
+ "- Observed sea level rise (3.7 mm/yr) matching model predictions\n\n"
193
+ "Natural factors like solar irradiance and volcanic activity cannot explain the current warming trend (IPCC AR6)."
194
+ )
195
+ },
196
+
197
+ "Market Equilibrium": {
198
+ "rubric": {
199
+ "key_concept_assessed": "Understanding of price mechanism and market adjustment processes",
200
+ "criteria_for_correct_answer": (
201
+ "A strong answer should:\n"
202
+ "1. Define equilibrium price/quantity using supply-demand curves\n"
203
+ "2. Analyze effects of price floors/ceilings with real examples\n"
204
+ "3. Explain elasticity's role in tax incidence\n"
205
+ "4. Distinguish between short-run and long-run adjustments"
206
+ ),
207
+ "common_misconceptions": (
208
+ "1. Believing equilibrium implies no transactions\n"
209
+ "2. Assuming price controls benefit all consumers/producers\n"
210
+ "3. Confusing movement along curves with shift of curves\n"
211
+ "4. Thinking elasticity is constant across price ranges"
212
+ ),
213
+ "cognitive_skill_tested": (
214
+ "Application: Using graphical models to predict market outcomes\n"
215
+ "Evaluation: Assessing welfare impacts of policy interventions\n"
216
+ "Synthesis: Connecting abstract models to real-world markets"
217
+ )
218
+ },
219
+ "answer": (
220
+ "Market equilibrium occurs when:\n\n"
221
+ "Qd(P) = Qs(P)\n\n"
222
+ "Key concepts:\n"
223
+ "1. Price ceiling (e.g., rent control) creates shortages when below equilibrium\n"
224
+ "2. Price floor (e.g., minimum wage) creates surpluses when above equilibrium\n"
225
+ "3. Tax incidence depends on relative elasticity - inelastic side bears more burden\n\n"
226
+ "Adjustment process:\n"
227
+ "- Short-run: Inventory changes and queuing\n"
228
+ "- Long-run: Entry/exit of firms and technological adaptation\n\n"
229
+ "Example: Gasoline taxes largely borne by consumers due to inelastic demand."
230
+ )
231
+ }
232
+ }
233
+
234
+ def main():
235
+ # Session State Initialization
236
+ if 'rubric' not in st.session_state:
237
+ st.session_state.rubric = {}
238
+ if 'answer' not in st.session_state:
239
+ st.session_state.answer = ""
240
+
241
+ # Back Button
242
+ if st.button("← Back to Dashboard", key="back_btn"):
243
+ st.switch_page("app.py")
244
+
245
+ # Page Header
246
+ st.markdown("<h1 class='header'>📚 Automated Answer Evaluation System</h1>", unsafe_allow_html=True)
247
+
248
+ # Example Selector
249
+ selected_example = st.selectbox("Load example scenario:", options=list(EXAMPLES.keys()))
250
+
251
+ # Handle Example Selection
252
+ if selected_example == "Select an example...":
253
+ st.session_state.rubric = {}
254
+ st.session_state.answer = ""
255
+ else:
256
+ example = EXAMPLES[selected_example]
257
+ st.session_state.rubric = example["rubric"]
258
+ st.session_state.answer = example["answer"]
259
+
260
+ # Rubric Input Section
261
+ with st.expander("🎯 Rubric Input", expanded=True):
262
+ col1, col2 = st.columns(2)
263
+ with col1:
264
+ key_concept = st.text_area(
265
+ "Key Concept Assessed",
266
+ value=st.session_state.rubric.get("key_concept_assessed", ""),
267
+ placeholder="What key concept is being assessed?",
268
+ height=150
269
+ )
270
+ criteria = st.text_area(
271
+ "Criteria for Correct Answer",
272
+ value=st.session_state.rubric.get("criteria_for_correct_answer", ""),
273
+ placeholder="What defines a correct answer?",
274
+ height=150
275
+ )
276
+ with col2:
277
+ misconceptions = st.text_area(
278
+ "Common Misconceptions",
279
+ value=st.session_state.rubric.get("common_misconceptions", ""),
280
+ placeholder="What common errors should be watched for?",
281
+ height=150
282
+ )
283
+ cognitive_skill = st.text_area(
284
+ "Cognitive Skill Tested",
285
+ value=st.session_state.rubric.get("cognitive_skill_tested", ""),
286
+ placeholder="Which cognitive skills are being tested?",
287
+ height=150
288
+ )
289
+
290
+ # Student Answer Section
291
+ student_answer = st.text_area(
292
+ "📝 Student Answer",
293
+ value=st.session_state.answer,
294
+ placeholder="Paste the student's answer here...",
295
+ height=300
296
+ )
297
+
298
+ # Action Buttons
299
+ col1, col2, col3 = st.columns([1,1,2])
300
+ with col1:
301
+ if st.button("🧹 Clear All", use_container_width=True):
302
+ st.session_state.rubric = {}
303
+ st.session_state.answer = ""
304
+ st.rerun()
305
+ with col2:
306
+ evaluate_btn = st.button("🔍 Evaluate Answer", use_container_width=True)
307
+
308
+ # Evaluation Logic
309
+ if evaluate_btn:
310
+ if not all([key_concept, criteria, misconceptions, cognitive_skill]) or not student_answer:
311
+ st.warning("❗ Please complete all rubric fields and provide a student answer!")
312
+ return
313
+
314
+ rubric = {
315
+ "key_concept_assessed": key_concept,
316
+ "criteria_for_correct_answer": criteria,
317
+ "common_misconceptions": misconceptions,
318
+ "cognitive_skill_tested": cognitive_skill
319
+ }
320
+
321
+ with st.spinner("🔍 Analyzing answer..."):
322
+ try:
323
+ result = evaluate_student_answer(student_answer, rubric)
324
+
325
+ # Results Display
326
+ st.markdown("---")
327
+ st.markdown("<h2 style='color: #2b3b52'>Evaluation Results</h2>", unsafe_allow_html=True)
328
+
329
+ # Score Container
330
+ with st.container():
331
+ st.markdown(f"""
332
+ <div class='score-container'>
333
+ <h2>Overall Score: {result.get('score', 0)}/10</h2>
334
+ </div>
335
+ """, unsafe_allow_html=True)
336
+
337
+ # Metrics
338
+ cols = st.columns(3)
339
+ metrics = result.get('breakdown', {})
340
+ with cols[0]:
341
+ st.markdown(f"""
342
+ <div class='metric-box'>
343
+ <h4>📐 Accuracy</h4>
344
+ <h1>{metrics.get('accuracy', 0)}</h1>
345
+ </div>
346
+ """, unsafe_allow_html=True)
347
+ with cols[1]:
348
+ st.markdown(f"""
349
+ <div class='metric-box'>
350
+ <h4>✅ Completeness</h4>
351
+ <h1>{metrics.get('completeness', 0)}</h1>
352
+ </div>
353
+ """, unsafe_allow_html=True)
354
+ with cols[2]:
355
+ st.markdown(f"""
356
+ <div class='metric-box'>
357
+ <h4>🎯 Relevance</h4>
358
+ <h1>{metrics.get('relevance', 0)}</h1>
359
+ </div>
360
+ """, unsafe_allow_html=True)
361
+
362
+ # Feedback
363
+ st.markdown(f"""
364
+ <div class='feedback-box'>
365
+ <h4>📌 Detailed Feedback</h4>
366
+ <p>{result.get('feedback', 'No feedback available')}</p>
367
+ </div>
368
+ """, unsafe_allow_html=True)
369
+
370
+ # Raw JSON
371
+ with st.expander("View Raw JSON Output"):
372
+ st.json(result)
373
+
374
+ except Exception as e:
375
+ st.error(f"🚨 Evaluation Error: {str(e)}")
376
+
377
+ if __name__ == "__main__":
378
+ main()
379
+
380
+
pages/question_paper.py ADDED
@@ -0,0 +1,583 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import json
3
+ import os
4
+ from datetime import datetime
5
+ from reportlab.lib.pagesizes import letter
6
+ from reportlab.pdfgen import canvas
7
+ from reportlab.lib import colors
8
+ from reportlab.lib.utils import simpleSplit
9
+ import random
10
+ from helper import generate_questions_from_file
11
+ from dotenv import load_dotenv
12
+ import os
13
+ load_dotenv()
14
+
15
+ # Page Configuration
16
+ st.set_page_config(page_title="Question Paper Generator", page_icon="📝", layout="wide")
17
+
18
+ st.markdown("""
19
+ <style>
20
+ .stMainBlockContainer {
21
+ padding: 20px 50px;
22
+ }
23
+ .stAppHeader {
24
+ position: relative;
25
+ height: 0;
26
+ }
27
+ </style>
28
+ """, unsafe_allow_html=True)
29
+
30
+ # Custom CSS for Enhanced Styling
31
+ st.markdown("""
32
+ <style>
33
+ :root {
34
+ --primary: #2E7D32;
35
+ --secondary: #388E3C;
36
+ --accent: #81C784;
37
+ --light: #E8F5E9;
38
+ --dark: #1B5E20;
39
+ }
40
+
41
+
42
+
43
+ .section-header {
44
+ font-size: 1.4rem;
45
+ font-weight: 600;
46
+ color: var(--primary);
47
+ margin: 1.5rem 0 1rem 0;
48
+ padding-bottom: 0.5rem;
49
+ border-bottom: 2px solid var(--accent);
50
+ }
51
+
52
+ .question-card {
53
+ padding: 1.5rem;
54
+ margin: 1rem 0;
55
+ background: #FFFFFF;
56
+ border-radius: 10px;
57
+ box-shadow: 0 2px 6px rgba(0,0,0,0.1);
58
+ border-left: 4px solid var(--accent);
59
+ transition: all 0.3s ease;
60
+ }
61
+
62
+ .question-card:hover {
63
+ transform: translateY(-2px);
64
+ box-shadow: 0 4px 12px rgba(0,0,0,0.15);
65
+ }
66
+
67
+ .selected-question {
68
+ background: var(--light) !important;
69
+ border-left: 4px solid var(--primary) !important;
70
+ }
71
+
72
+ .stSelectbox, .stTextArea, .stNumberInput {
73
+ margin-bottom: 1.5rem;
74
+ }
75
+
76
+ .download-btn {
77
+ background: var(--primary) !important;
78
+ color: white !important;
79
+ border: none !important;
80
+ border-radius: 8px !important;
81
+ padding: 10px 25px !important;
82
+ transition: all 0.3s ease !important;
83
+ }
84
+
85
+ .download-btn:hover {
86
+ background: var(--secondary) !important;
87
+ transform: scale(1.05);
88
+ }
89
+
90
+ .rubric-expander {
91
+ margin-top: 1rem;
92
+ border-left: 3px solid var(--accent);
93
+ padding-left: 1rem;
94
+ }
95
+ </style>
96
+ """, unsafe_allow_html=True)
97
+
98
+ # Back Button
99
+ if st.button("← Back to Dashboard", key="back_btn"):
100
+ st.switch_page("app.py")
101
+
102
+ st.title("📝 Question Paper Generation")
103
+
104
+ # Add intro instructions
105
+ st.markdown("""
106
+ Welcome to the Question Paper Generator! Follow these steps to create your custom question paper:
107
+ 1. Upload your study material in PDF format
108
+ 2. Enter specific query or topic for questions (optional)
109
+ 3. Select the number and types of questions
110
+ 4. Adjust the cognitive levels using Bloom's Taxonomy
111
+ 5. Choose difficulty level
112
+ 6. Generate and select questions
113
+ 7. Download your question paper
114
+ """)
115
+
116
+ # Initialize session state variables
117
+ if 'questions' not in st.session_state:
118
+ st.session_state.questions = None
119
+ if 'accepted_questions' not in st.session_state:
120
+ st.session_state.accepted_questions = []
121
+ if 'generated' not in st.session_state:
122
+ st.session_state.generated = False
123
+ if 'loading' not in st.session_state:
124
+ st.session_state.loading = False
125
+
126
+ # File to store resources
127
+ RESOURCE_FILE = "resources.json"
128
+
129
+ def load_resources():
130
+ if os.path.exists(RESOURCE_FILE):
131
+ with open(RESOURCE_FILE, "r") as f:
132
+ return json.load(f)
133
+ return []
134
+
135
+ def save_resources(resources):
136
+ with open(RESOURCE_FILE, "w") as f:
137
+ json.dump(resources, f, indent=4)
138
+
139
+ # Step 1: Choose or Upload Resource
140
+ st.markdown("<div class='section-header'>📚 Step 1: Upload Resource</div>", unsafe_allow_html=True)
141
+ st.markdown("Upload your study material in PDF format. This will be used as the source for generating questions.")
142
+
143
+ # Upload Resource Section
144
+ uploaded_file = st.file_uploader("Upload PDF", type=["pdf"])
145
+ if uploaded_file:
146
+ # Save file
147
+ UPLOADS_DIR = "uploads"
148
+ os.makedirs(UPLOADS_DIR, exist_ok=True)
149
+ file_path = os.path.join(UPLOADS_DIR, uploaded_file.name)
150
+ with open(file_path, "wb") as f:
151
+ f.write(uploaded_file.getbuffer())
152
+
153
+ # Step 2: Enter Query
154
+ st.markdown("<div class='section-header'>🔍 Step 2: Enter Your Query</div>", unsafe_allow_html=True)
155
+ st.markdown("Specify any particular topic or concept you want to focus on. Leave blank to generate questions from the entire material.")
156
+ query = st.text_area("Enter your query here", height=120)
157
+
158
+ # Step 3: Choose Paper Pattern
159
+ st.markdown("<div class='section-header'>📄 Step 3: Choose Paper Pattern</div>", unsafe_allow_html=True)
160
+ st.markdown("Select the number of questions for each type. The total should not exceed 8 questions.")
161
+ st.warning("⚠️ Note: Due to compute resource limitations, please limit the total number of questions to 8 or fewer.")
162
+
163
+ col1, col2, col3 = st.columns(3)
164
+ with col1:
165
+ num_mcq = st.number_input("MCQs", min_value=0, value=3)
166
+ with col2:
167
+ num_tf = st.number_input("True/False", min_value=0, value=3)
168
+ with col3:
169
+ num_qa = st.number_input("Q&A", min_value=0, value=2)
170
+
171
+ total_questions = num_mcq + num_tf + num_qa
172
+ if total_questions > 10:
173
+ st.error("❌ Total number of questions cannot exceed 8. Please adjust your selection.")
174
+
175
+ # Step 4: Bloom’s Taxonomy Weights
176
+ st.markdown("<div class='section-header'>🎯 Step 4: Set Bloom’s Taxonomy Weights</div>", unsafe_allow_html=True)
177
+ st.markdown("Adjust the cognitive level distribution of your questions. The total of all weights should sum to 100.")
178
+ col1, col2 = st.columns(2)
179
+ with col1:
180
+ bloom_weights = {
181
+ "Remembering": st.slider("Remembering", 0, 100, 20,
182
+ help="Recall facts and basic concepts"),
183
+ "Understanding": st.slider("Understanding", 0, 100, 20,
184
+ help="Explain ideas and concepts"),
185
+ "Applying": st.slider("Applying", 0, 100, 20,
186
+ help="Use information in new situations")
187
+ }
188
+ with col2:
189
+ bloom_weights.update({
190
+ "Analyzing": st.slider("Analyzing", 0, 100, 20,
191
+ help="Draw connections among ideas"),
192
+ "Evaluating": st.slider("Evaluating", 0, 100, 10,
193
+ help="Justify decisions through evidence"),
194
+ "Creating": st.slider("Creating", 0, 100, 10,
195
+ help="Produce original work")
196
+ })
197
+
198
+ # Step 5: Difficulty Level
199
+ st.markdown("<div class='section-header'>📊 Step 5: Set Difficulty Level</div>", unsafe_allow_html=True)
200
+ st.markdown("Choose the overall difficulty level for your question paper.")
201
+ level = st.select_slider("Select Difficulty Level",
202
+ options=["Easy", "Medium", "Hard"],
203
+ value="Medium")
204
+
205
+ def generate_pdf_with_rubric(questions, filename="questions_with_rubric.pdf"):
206
+ try:
207
+ c = canvas.Canvas(filename, pagesize=letter)
208
+ width, height = letter # Get page size
209
+
210
+ y_position = height - 50 # Starting position
211
+
212
+ # Title
213
+ c.setFont("Helvetica-Bold", 16)
214
+ c.drawString(150, y_position, "Question Paper with Rubrics & Solutions")
215
+ y_position -= 30
216
+
217
+ for idx, q in enumerate(questions, 1):
218
+ if y_position < 100: # Check if new page is needed
219
+ c.showPage()
220
+ c.setFont("Helvetica", 12)
221
+ y_position = height - 50
222
+
223
+ # Question
224
+ c.setFont("Helvetica-Bold", 12)
225
+ question_text = f"Q{idx}: {q.get('question', q.get('statement', 'Question'))}"
226
+ wrapped_text = simpleSplit(question_text, "Helvetica-Bold", 12, width - 100)
227
+
228
+ for line in wrapped_text:
229
+ c.drawString(50, y_position, line)
230
+ y_position -= 15
231
+
232
+ c.setFont("Helvetica", 12)
233
+
234
+ # Options for MCQ
235
+ if 'options' in q:
236
+ for opt, text in q['options'].items():
237
+ wrapped_opt = simpleSplit(f" {opt}. {text}", "Helvetica", 12, width - 100)
238
+ for line in wrapped_opt:
239
+ c.drawString(70, y_position, line)
240
+ y_position -= 15
241
+
242
+ # Correct answer (in Green)
243
+ c.setFillColor(colors.green)
244
+ c.drawString(70, y_position, f" Correct Answer: {q['correct_answer']}")
245
+ y_position -= 20
246
+ c.setFillColor(colors.black)
247
+
248
+ # Correct answer for True/False
249
+ if 'correct_answer' in q and 'options' not in q:
250
+ c.setFillColor(colors.green)
251
+ c.drawString(70, y_position, f" Correct Answer: {q['correct_answer']}")
252
+ y_position -= 20
253
+ c.setFillColor(colors.black)
254
+
255
+ # Rubric Section
256
+ c.setFont("Helvetica-Oblique", 10)
257
+ c.setFillColor(colors.grey)
258
+ c.drawString(50, y_position, "Rubric:")
259
+ y_position -= 15
260
+
261
+ for key, value in q['rubric'].items():
262
+ wrapped_rubric = simpleSplit(f"{key}: {value}", "Helvetica-Oblique", 10, width - 100)
263
+ for line in wrapped_rubric:
264
+ c.drawString(70, y_position, line)
265
+ y_position -= 12
266
+
267
+ y_position -= 10 # Space before next question
268
+ c.setFont("Helvetica", 12)
269
+ c.setFillColor(colors.black)
270
+
271
+ c.save()
272
+ return filename
273
+ except Exception as e:
274
+ print(f"Error generating PDF: {e}")
275
+ return None
276
+
277
+
278
+ def generate_pdf(questions, filename="questions.pdf"):
279
+ try:
280
+ c = canvas.Canvas(filename, pagesize=letter)
281
+ width, height = letter
282
+ y_position = height - 50 # Starting position
283
+
284
+ # Title
285
+ c.setFont("Helvetica-Bold", 16)
286
+ c.drawString(200, y_position, "Generated Questions")
287
+ y_position -= 30
288
+
289
+ for i, question in enumerate(questions, 1):
290
+ if y_position < 100: # Check if new page is needed
291
+ c.showPage()
292
+ c.setFont("Helvetica", 12)
293
+ y_position = height - 50
294
+
295
+ c.setFont("Helvetica", 12)
296
+ wrapped_text = simpleSplit(f"Q{i}: {question}", "Helvetica", 12, width - 100)
297
+
298
+ for line in wrapped_text:
299
+ c.drawString(50, y_position, line)
300
+ y_position -= 15
301
+
302
+ c.save()
303
+ return filename
304
+ except Exception as e:
305
+ print(f"Error generating PDF: {e}")
306
+ return None
307
+
308
+ def generate_dummy_scores(num_questions=5):
309
+ categories = ['Remembering', 'Understanding', 'Applying',
310
+ 'Analyzing', 'Evaluating', 'Creating']
311
+
312
+ dummy_data = []
313
+ for i in range(num_questions):
314
+ question = f"Sample question about concept {i+1} in quantum mechanics?"
315
+ scores = {
316
+ category: round(random.uniform(0.2, 0.95), 2)
317
+ for category in categories
318
+ }
319
+ dummy_data.append({
320
+ 'question': question,
321
+ 'score': scores
322
+ })
323
+ return dummy_data
324
+
325
+ def parse_question_response(response):
326
+ """
327
+ Parse and standardize question response into a consistent format
328
+ """
329
+ parsed_questions = {
330
+ 'mcq': [],
331
+ 'tf': [],
332
+ 'qna': []
333
+ }
334
+
335
+ # Parse MCQ questions
336
+ if 'mcq' in response:
337
+ for q in response['mcq']:
338
+ # Handle both tuple and dict formats
339
+ question_data = q[0] if isinstance(q, tuple) else q
340
+ parsed_mcq = {
341
+ 'question': question_data.get('question', ''),
342
+ 'options': question_data.get('options', {}),
343
+ 'correct_answer': question_data.get('correct_answer', ''),
344
+ 'rubric': question_data.get('rubric', {}),
345
+ 'type': 'mcq'
346
+ }
347
+ parsed_questions['mcq'].append(parsed_mcq)
348
+
349
+ # Parse True/False questions
350
+ if 'tf' in response:
351
+ for q in response['tf']:
352
+ parsed_tf = {
353
+ 'question': q.get('statement', ''),
354
+ 'correct_answer': q.get('correct_answer', ''),
355
+ 'rubric': q.get('rubric', {}),
356
+ 'type': 'tf'
357
+ }
358
+ parsed_questions['tf'].append(parsed_tf)
359
+
360
+ # Parse Q&A questions
361
+ if 'qna' in response:
362
+ for q in response['qna']:
363
+ # Handle nested structure
364
+ if 'questions' in q:
365
+ question_data = q['questions'][0]
366
+ else:
367
+ question_data = q
368
+
369
+ parsed_qna = {
370
+ 'question': question_data.get('question', ''),
371
+ 'rubric': question_data.get('rubric', {}),
372
+ 'type': 'qna'
373
+ }
374
+ parsed_questions['qna'].append(parsed_qna)
375
+
376
+ return parsed_questions
377
+
378
+ def generate_questions(filename,mcq,tf,qna, create, evaluate, analyze, apply, understand, remember, level):
379
+ if filename:
380
+ file_path = os.path.join("uploads", filename)
381
+ sum = create + evaluate + analyze + apply + understand + remember
382
+ create = create / sum
383
+ evaluate = evaluate / sum
384
+ analyze = analyze / sum
385
+ apply = apply / sum
386
+ understand = understand / sum
387
+ remember = remember / sum
388
+ level = level.lower()
389
+ if level == "easy":
390
+ level = 1
391
+ elif level == "medium":
392
+ level = 2
393
+ else:
394
+ level = 3
395
+ # print(file_path, mcq, tf, qna, create, evaluate, analyze, apply, understand, remember, level)
396
+ questions = generate_questions_from_file(file_path, mcq, tf, qna, create, evaluate, analyze, apply, understand, remember, level)
397
+ # print(questions)
398
+ # print("Now parsing")
399
+ parsed_questions = parse_question_response(questions)
400
+ # print(parsed_questions)
401
+ return parsed_questions
402
+ else:
403
+ return {'mcq': [{'question': 'What is the primary goal of using generalized additive models in regression analysis?',
404
+ 'options': {'A': 'To assume a linear relationship between the predictors and the response variable',
405
+ 'B': 'To identify and characterize nonlinear regression effects',
406
+ 'C': 'To reduce the dimensionality of the predictor space',
407
+ 'D': 'To assume a specific distribution for the response variable'},
408
+ 'correct_answer': 'B',
409
+ 'rubric': {'key_concept_assessed': 'Understanding of generalized additive models',
410
+ 'criteria_for_correct_answer': 'The correct answer is B because generalized additive models are used to model nonlinear relationships between predictors and the response variable, allowing for more flexibility in the analysis.',
411
+ 'common_misconceptions': 'Assuming linearity, assuming a specific distribution, or reducing dimensionality are not the primary goals of generalized additive models',
412
+ 'cognitive_skill_tested': "This question requires the ability to understand the purpose and application of generalized additive models, which aligns with the 'Understanding' category of Bloom's Taxonomy."}}],
413
+ 'tf': [{'statement': 'The nearest shrunken centroids method uses all of the features (genes) and does not perform automatic feature selection.',
414
+ 'correct_answer': False,
415
+ 'rubric': {'key_concept_assessed': 'Nearest Shrunken Centroids (NSC) method for classification',
416
+ 'criteria_for_correct_answer': 'The NSC method is designed to automatically select features by shrinking the classwise mean toward the overall mean for each feature, resulting in the removal of features with zero shrunken values.',
417
+ 'common_misconceptions': 'Believing that NSC does not perform feature selection or assuming it uses all features without modification.',
418
+ 'cognitive_skill_tested': 'Analysis, as the question requires understanding the mechanism of the NSC method and its implications on feature selection.'}}],
419
+ 'qna': [{'question': 'Explain how the concept of the false discovery rate (FDR) is used in multiple hypothesis testing in the context of microarray analysis. Discuss the advantages and limitations of this approach, providing examples to illustrate your points. Consider how the choice of the FDR threshold affects the results and how it can impact the interpretation of the findings in a high-dimensional data setting.',
420
+ 'rubric': {'key_concept_assessed': 'Understanding of the false discovery rate (FDR) and its application in multiple hypothesis testing, particularly in the context of microarray analysis',
421
+ 'criteria_for_correct_answer': 'A correct answer should include a clear definition of FDR, its advantages in controlling the number of false positives, limitations such as dependence on the chosen threshold, and examples illustrating its application in microarray analysis. The answer should also discuss how different thresholds can lead to different conclusions and the importance of considering this in high-dimensional data analysis',
422
+ 'common_misconceptions': 'Confusing FDR with family-wise error rate (FWER), not considering the impact of the chosen threshold on the results, and overlooking the assumptions underlying the FDR method',
423
+ 'cognitive_skill_tested': 'This question requires the ability to analyze complex statistical concepts, evaluate their strengths and limitations, and apply them to practical problems, which aligns with the higher-order thinking skills of analysis, evaluation, and synthesis in Bloom’s Taxonomy'}}]}
424
+
425
+ # Generate Paper Button
426
+ if st.button("🎯 Generate Question Paper", key="generate_btn", disabled=st.session_state.loading):
427
+ if not uploaded_file:
428
+ st.error("Please upload a valid resource first!")
429
+ else:
430
+ with st.spinner('Generating questions... Please wait...'):
431
+ st.session_state.loading = True
432
+ st.session_state.questions = generate_questions(
433
+ uploaded_file.name, num_mcq, num_tf, num_qa,
434
+ bloom_weights["Creating"], bloom_weights["Evaluating"],
435
+ bloom_weights["Analyzing"], bloom_weights["Applying"],
436
+ bloom_weights["Understanding"], bloom_weights["Remembering"], level
437
+ )
438
+ st.session_state.generated = True
439
+ st.session_state.accepted_questions = []
440
+ st.session_state.loading = False
441
+ st.rerun()
442
+
443
+ # Display generated questions
444
+ if st.session_state.generated and st.session_state.questions:
445
+ st.markdown("---")
446
+ st.subheader("Generated Questions")
447
+
448
+ with st.form(key='question_selection'):
449
+ tabs = st.tabs(["MCQ", "True/False", "Q&A"])
450
+
451
+ with tabs[0]:
452
+ for idx, q in enumerate(st.session_state.questions['mcq'], 1):
453
+ col1, col2 = st.columns([4, 1])
454
+ with col1:
455
+ # Check if q is a tuple and get the dictionary from it if it is
456
+ question_data = q[0] if isinstance(q, tuple) else q
457
+
458
+ st.write(f"**Q{idx}:** {question_data['question']}")
459
+ for opt, text in question_data['options'].items():
460
+ st.write(f"{opt}. {text}")
461
+
462
+ st.write(f"Correct Answer: {question_data['correct_answer']}")
463
+ with st.expander("Rubric"):
464
+ st.json(question_data['rubric'])
465
+ create, evaluate, analyze, apply, understand, remember = generate_dummy_scores(num_questions=1)[0]['score'].values()
466
+
467
+ with col2:
468
+ if st.checkbox(f"Select MCQ {idx}", key=f"mcq_{idx}"):
469
+ if q not in st.session_state.accepted_questions:
470
+ st.session_state.accepted_questions.append(q)
471
+ else:
472
+ if q in st.session_state.accepted_questions:
473
+ st.session_state.accepted_questions.remove(q)
474
+
475
+ with tabs[1]:
476
+ for idx, q in enumerate(st.session_state.questions['tf'], 1):
477
+ col1, col2 = st.columns([4, 1])
478
+ with col1:
479
+ st.write(f"**Q{idx}:** {q['question']}")
480
+ st.write(f"Correct Answer: {q['correct_answer']}")
481
+ with st.expander("Rubric"):
482
+ st.json(q['rubric'])
483
+ create, evaluate, analyze, apply, understand, remember = generate_dummy_scores(num_questions=1)[0]['score'].values()
484
+ with col2:
485
+ if st.checkbox(f"Select TF {idx}", key=f"tf_{idx}"):
486
+ if q not in st.session_state.accepted_questions:
487
+ st.session_state.accepted_questions.append(q)
488
+ else:
489
+ if q in st.session_state.accepted_questions:
490
+ st.session_state.accepted_questions.remove(q)
491
+
492
+ with tabs[2]:
493
+ # Check if 'qna' questions exist and have the expected structure
494
+ if 'qna' in st.session_state.questions and st.session_state.questions['qna']:
495
+ for idx, q_wrapper in enumerate(st.session_state.questions['qna'], 1):
496
+ col1, col2 = st.columns([4, 1])
497
+ with col1:
498
+ # Handle the nested structure
499
+ if 'questions' in q_wrapper:
500
+ # Get the first question from the nested structure
501
+ q = q_wrapper['questions'][0]
502
+ st.write(f"**Q{idx}:** {q['question']}")
503
+ with st.expander("Rubric"):
504
+ st.json(q['rubric'])
505
+ else:
506
+ # Fallback for direct question structure
507
+ q = q_wrapper
508
+ st.write(f"**Q{idx}:** {q.get('question', 'No question available')}")
509
+ if 'rubric' in q:
510
+ with st.expander("Rubric"):
511
+ st.json(q['rubric'])
512
+
513
+ with col2:
514
+ if st.checkbox(f"Select Q&A {idx}", key=f"qna_{idx}"):
515
+ if q_wrapper not in st.session_state.accepted_questions:
516
+ st.session_state.accepted_questions.append(q_wrapper)
517
+ else:
518
+ if q_wrapper in st.session_state.accepted_questions:
519
+ st.session_state.accepted_questions.remove(q_wrapper)
520
+
521
+ st.form_submit_button("Update Selection")
522
+
523
+ # Accepted Questions and PDF Download
524
+ if st.session_state.accepted_questions:
525
+ st.markdown("---")
526
+ st.subheader("✅ Accepted Questions")
527
+
528
+ for idx, q in enumerate(st.session_state.accepted_questions, 1):
529
+ if isinstance(q, tuple):
530
+ question_data = q[0]
531
+ else:
532
+ question_data = q
533
+ question_text = question_data.get('question',
534
+ question_data.get('statement',
535
+ 'Question'))
536
+ st.write(f"{idx}. {question_text}")
537
+
538
+ col1, col2 = st.columns(2)
539
+
540
+ with col1:
541
+ questions_for_pdf = []
542
+ for q in st.session_state.accepted_questions:
543
+ if isinstance(q, tuple):
544
+ questions_for_pdf.append(q[0].get('question', ''))
545
+ else:
546
+ questions_for_pdf.append(q.get('question', q.get('statement', '')))
547
+
548
+ if questions_for_pdf:
549
+ filename = generate_pdf(questions_for_pdf)
550
+ if filename:
551
+ with open(filename, "rb") as f:
552
+ pdf_data = f.read()
553
+ st.download_button(
554
+ label="⬇️ Download Basic Questions",
555
+ data=pdf_data,
556
+ file_name="questions.pdf",
557
+ mime="application/pdf"
558
+ )
559
+
560
+ with col2:
561
+ questions_for_pdf = []
562
+ for q in st.session_state.accepted_questions:
563
+ if isinstance(q, tuple):
564
+ questions_for_pdf.append(q[0])
565
+ else:
566
+ questions_for_pdf.append(q)
567
+
568
+ if questions_for_pdf:
569
+ filename = generate_pdf_with_rubric(questions_for_pdf)
570
+ if filename:
571
+ with open(filename, "rb") as f:
572
+ pdf_data = f.read()
573
+ st.download_button(
574
+ label="⬇️ Download Full Version",
575
+ data=pdf_data,
576
+ file_name="questions_with_rubric.pdf",
577
+ mime="application/pdf"
578
+ )
579
+
580
+ # client = OpenAI(
581
+ # api_key="os.getenv("API_KEY")",
582
+ # base_url=os.getenv("GENERATOR_BASE_URL")
583
+ # )
requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ spacy
2
+ streamlit
3
+ dotenv
4
+ openai
5
+ sklearn
6
+ reportlab
7
+ langchain
8
+ pdfplumber
9
+ pandas
10
+ numpy
resources.json ADDED
@@ -0,0 +1 @@
 
 
1
+ []