Files changed (11) hide show
  1. .env +10 -0
  2. answer_generation.py +167 -0
  3. app.py +177 -0
  4. compose.yaml +48 -0
  5. controller.py +30 -0
  6. db.py +80 -0
  7. dockerfile +15 -0
  8. llm_pipeline.py +13 -0
  9. prompts.py +79 -0
  10. question_generation.py +98 -0
  11. requirements.txt +21 -0
.env ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ GOOGLE_API_KEY=AIzaSyBgqDYEdAQfZuon4zjT1GCmWM8fLTxZyvI
2
+ MONGO_URL=mongodb+srv://sriomdash04:kpDl69XjZunz9PHD@reasoningdata.4sroyx1.mongodb.net/?retryWrites=true&w=majority&appName=ReasoningData
3
+
4
+ FASTAPI_URL=http://127.0.0.1:8000/generate
5
+
6
+ LLM_MODEL=gemini-2.5-pro
7
+
8
+
9
+ QUESTION_GENERATION_URL = "http://question_service:9000"
10
+ ANSWER_GENERATION_URL = "http://answer_service:8000"
answer_generation.py ADDED
@@ -0,0 +1,167 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ from dotenv import load_dotenv
4
+ from pydantic import BaseModel
5
+ import google.generativeai as genai
6
+ from fastapi import FastAPI, HTTPException
7
+ import uvicorn
8
+ from prompts import PROMPTS
9
+ from llm_pipeline import example_odia_answer_json, example_odia_question_json
10
+
11
+ # Setup
12
+ load_dotenv()
13
+
14
+ # Check for required environment variables
15
+ google_api_key = os.getenv("GOOGLE_API_KEY")
16
+ if not google_api_key:
17
+ raise ValueError("GOOGLE_API_KEY not found in environment variables")
18
+
19
+ genai.configure(api_key=google_api_key)
20
+ model = genai.GenerativeModel(os.getenv("LLM_MODEL", "gemini-pro"))
21
+ LANGUAGE = "Odia"
22
+
23
+ # Models
24
+ class QuestionRequest(BaseModel):
25
+ question: str
26
+
27
+ class LLMResponseModel(BaseModel):
28
+ question_content: str
29
+ answer_language: str = LANGUAGE
30
+ reasoning_content: str
31
+ answer_content: str
32
+
33
+ def create_prompt(user_odia_question: str) -> str:
34
+ SIMPLE_PROMPT = PROMPTS["odia_reasoning_generation_prompt"]
35
+ prompt = SIMPLE_PROMPT.format(
36
+ user_odia_question=user_odia_question,
37
+ example_odia_question_json=example_odia_question_json,
38
+ example_answer_json=example_odia_answer_json
39
+ )
40
+
41
+ return prompt
42
+ # Functions
43
+ def chat_with_model(prompt: str) -> str:
44
+ try:
45
+ response = model.generate_content(prompt)
46
+ return response.text if response.text else "Error: Empty response"
47
+ except Exception as e:
48
+ return f"Error: {str(e)}"
49
+
50
+ def clean_json_text(text: str) -> str:
51
+ if text.startswith("Error:"):
52
+ return text
53
+
54
+ # Remove markdown code blocks
55
+ text = text.strip()
56
+ if text.startswith("```"):
57
+ lines = text.split('\n')
58
+ if len(lines) > 2:
59
+ text = '\n'.join(lines[1:-1])
60
+ else:
61
+ text = text.strip("`").replace("json", "", 1).strip()
62
+
63
+ # Extract JSON content
64
+ first = text.find("{")
65
+ last = text.rfind("}")
66
+ if first != -1 and last != -1:
67
+ return text[first:last+1]
68
+
69
+ return text
70
+
71
+ def validate_output(raw_output: str, original_question: str):
72
+ cleaned = clean_json_text(raw_output)
73
+
74
+ if cleaned.startswith("Error:"):
75
+ return {
76
+ "question_content": original_question,
77
+ "answer_language": LANGUAGE,
78
+ "reasoning_content": f"Error occurred: {cleaned}",
79
+ "answer_content": "Unable to generate answer due to error",
80
+ "error": cleaned
81
+ }
82
+
83
+ try:
84
+ # Try to parse and validate JSON
85
+ parsed_data = json.loads(cleaned)
86
+ validated = LLMResponseModel(**parsed_data)
87
+ return validated.model_dump()
88
+ except json.JSONDecodeError as je:
89
+ return {
90
+ "question_content": original_question,
91
+ "answer_language": LANGUAGE,
92
+ "reasoning_content": f"JSON parsing failed: {str(je)}",
93
+ "answer_content": "Unable to parse model response",
94
+ "error": f"JSON Error: {str(je)}"
95
+ }
96
+ except Exception as e:
97
+ return {
98
+ "question_content": original_question,
99
+ "answer_language": LANGUAGE,
100
+ "reasoning_content": f"Validation failed: {str(e)}",
101
+ "answer_content": "Unable to validate model response",
102
+ "error": f"Validation Error: {str(e)}"
103
+ }
104
+
105
+ def run_pipeline(question: str):
106
+ try:
107
+ # Use simple prompt if PROMPTS not available
108
+ prompt =create_prompt(user_odia_question=question)
109
+ raw_output = chat_with_model(prompt)
110
+ return validate_output(raw_output, question)
111
+ except Exception as e:
112
+ return {
113
+ "question_content": question,
114
+ "answer_language": LANGUAGE,
115
+ "reasoning_content": f"Pipeline error: {str(e)}",
116
+ "answer_content": "Unable to process question",
117
+ "error": f"Pipeline Error: {str(e)}"
118
+ }
119
+
120
+ # API
121
+ app = FastAPI(title="Odia Question Answering API", version="0.1.0")
122
+
123
+ @app.get("/")
124
+ async def root():
125
+ return {"message": "Odia Question Answering API is running", "status": "healthy"}
126
+
127
+ @app.get("/health")
128
+ async def health_check():
129
+ try:
130
+ # Test model connectivity
131
+ test_response = model.generate_content("Test")
132
+ return {
133
+ "status": "healthy",
134
+ "model": os.getenv("LLM_MODEL", "gemini-pro"),
135
+ "api_configured": bool(google_api_key)
136
+ }
137
+ except Exception as e:
138
+ return {
139
+ "status": "unhealthy",
140
+ "error": str(e),
141
+ "api_configured": bool(google_api_key)
142
+ }
143
+
144
+ @app.post("/generate")
145
+ async def generate_answer(request: QuestionRequest):
146
+ try:
147
+ if not request.question.strip():
148
+ raise HTTPException(status_code=400, detail="Question cannot be empty")
149
+
150
+ result = run_pipeline(request.question.strip())
151
+
152
+ # Check for critical errors that should return 500
153
+ if "error" in result and any(err_type in result["error"] for err_type in ["Error: ", "Pipeline Error:"]):
154
+ raise HTTPException(status_code=500, detail=f"Processing failed: {result['error']}")
155
+
156
+ return {"success": True, "data": result}
157
+
158
+ except HTTPException:
159
+ raise
160
+ except Exception as e:
161
+ raise HTTPException(status_code=500, detail=f"Unexpected error: {str(e)}")
162
+
163
+ if __name__ == "__main__":
164
+ print("Starting Odia Question Answering API...")
165
+ print(f"Google API Key configured: {'Yes' if google_api_key else 'No'}")
166
+ print(f"Model: {os.getenv('LLM_MODEL', 'gemini-pro')}")
167
+ uvicorn.run(app, host="127.0.0.1", port=9000, reload=True)
app.py ADDED
@@ -0,0 +1,177 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from controller import generate_questions # returns question_list (or a dict containing it)
3
+ from controller import generate_answer # returns a dict with question_content, answer_content, reasoning_content
4
+ from db import save_in_db # saves parsed dict into mongo
5
+
6
+ MAX_QUESTIONS = 20
7
+
8
+ # ---------------- Helpers ---------------- #
9
+ def _extract_question_list(result):
10
+ """Safely extract question list from a controller response"""
11
+ if not result:
12
+ return []
13
+ if isinstance(result, dict):
14
+ if "success" in result and isinstance(result.get("data"), dict):
15
+ return result["data"].get("question_list", [])
16
+ if "question_list" in result:
17
+ return result["question_list"] or []
18
+ if "data" in result and isinstance(result["data"], list):
19
+ return result["data"]
20
+ if isinstance(result, list):
21
+ return result
22
+ return []
23
+
24
+ def _extract_qa(result, original_q):
25
+ """Return a dict with question_content, answer_content, reasoning_content"""
26
+ default = {
27
+ "question_content": original_q,
28
+ "answer_content": "No answer returned",
29
+ "reasoning_content": ""
30
+ }
31
+ if not result:
32
+ return default
33
+ if isinstance(result, dict):
34
+ d = result.get("data") if result.get("success") and isinstance(result.get("data"), dict) else result
35
+ return {
36
+ "question_content": d.get("question_content", original_q),
37
+ "answer_content": d.get("answer_content", d.get("answer", "")) or "",
38
+ "reasoning_content": d.get("reasoning_content", d.get("reasoning", "")) or ""
39
+ }
40
+ return default
41
+
42
+ # ---------------- Handlers ---------------- #
43
+ def generate_questions_ui(topic: str, num_questions: int):
44
+ """Stream Q → A → R one by one"""
45
+ result_values = []
46
+ for _ in range(MAX_QUESTIONS):
47
+ result_values.extend([
48
+ "", "", "", # Q, A, R
49
+ gr.update(visible=False, interactive=False), # accept
50
+ gr.update(visible=False, interactive=False), # reject
51
+ gr.update(visible=False) # group hidden
52
+ ])
53
+ yield result_values
54
+
55
+ try:
56
+ qres = generate_questions(topic.strip(), int(num_questions))
57
+ except Exception as e:
58
+ result_values[0] = f"Error generating questions: {e}"
59
+ yield result_values
60
+ return
61
+
62
+ question_list = _extract_question_list(qres)
63
+ if not question_list:
64
+ result_values[0] = "No questions returned."
65
+ yield result_values
66
+ return
67
+
68
+ for i, q in enumerate(question_list[:MAX_QUESTIONS]):
69
+ base = i * 6
70
+ result_values[base + 0] = q
71
+ result_values[base + 1] = "Generating answer..."
72
+ result_values[base + 2] = "Generating reasoning..."
73
+ result_values[base + 5] = gr.update(visible=True)
74
+ yield result_values
75
+
76
+ try:
77
+ ans_res = generate_answer(q)
78
+ qa = _extract_qa(ans_res, q)
79
+ result_values[base + 1] = qa["answer_content"]
80
+ result_values[base + 2] = qa["reasoning_content"]
81
+ result_values[base + 3] = gr.update(visible=True, interactive=True)
82
+ result_values[base + 4] = gr.update(visible=True, interactive=True)
83
+ except Exception as e:
84
+ result_values[base + 1] = f"Error: {e}"
85
+ result_values[base + 2] = ""
86
+ result_values[base + 4] = gr.update(visible=True, interactive=True)
87
+
88
+ yield result_values
89
+
90
+ yield result_values
91
+
92
+ def accept_question(question, answer, reasoning):
93
+ """Save in DB and hide card"""
94
+ parsed = {
95
+ "question_content": question,
96
+ "answer_language": "Odia",
97
+ "reasoning_content": reasoning,
98
+ "answer_content": answer,
99
+ }
100
+ try:
101
+ save_in_db(parsed)
102
+ return (
103
+ gr.update(visible=False), # accept_btn
104
+ gr.update(visible=False), # reject_btn
105
+ gr.update(visible=False) # group hidden
106
+ )
107
+ except Exception as e:
108
+ return (
109
+ gr.update(interactive=False, value=f"Error: {e}"),
110
+ gr.update(visible=True),
111
+ gr.update(visible=True)
112
+ )
113
+
114
+ def reject_card():
115
+ """Hide rejected card"""
116
+ return gr.update(visible=False)
117
+
118
+ # ---------------- UI Layout ---------------- #
119
+ custom_css = """
120
+ .gradio-container { background-color: #121212 !important; color: #E0E0E0 !important; }
121
+ .question-card {
122
+ border: 1px solid #333;
123
+ box-shadow: 0 4px 12px rgba(0,0,0,0.4);
124
+ border-radius: 12px;
125
+ padding: 20px !important;
126
+ margin-bottom: 20px !important;
127
+ background-color: #1E1E1E;
128
+ transition: 0.3s ease-in-out;
129
+ }
130
+ .question-card:hover {
131
+ transform: translateY(-3px);
132
+ box-shadow: 0 6px 16px rgba(0,0,0,0.6);
133
+ }
134
+ textarea { background-color: #2A2A2A !important; color: #E0E0E0 !important; border: 1px solid #444 !important; border-radius: 8px !important; }
135
+ button { border-radius: 8px !important; padding: 8px 12px !important; }
136
+ """
137
+
138
+ with gr.Blocks(theme=gr.themes.Base(), css=custom_css) as demo:
139
+ gr.Markdown("<h2 style='color:#90CAF9;'>🌙 Odia Q&A — Generate → Answer (streaming)</h2>")
140
+
141
+ with gr.Row():
142
+ topic_input = gr.Textbox(label="📝 Topic", placeholder="Enter a topic, e.g., 'Photosynthesis'")
143
+ num_questions_input = gr.Dropdown(label="🔢 Number of Questions", choices=[5, 10, 15, 20], value=5)
144
+ generate_btn = gr.Button("⚡ Generate", variant="primary")
145
+
146
+ output_components = []
147
+ for i in range(MAX_QUESTIONS):
148
+ with gr.Group(visible=False, elem_classes=["question-card"]) as output_group:
149
+ with gr.Row():
150
+ with gr.Column(scale=4):
151
+ q_text = gr.Textbox(label="❓ Question", interactive=False)
152
+ a_text = gr.Textbox(label="✅ Answer", interactive=False)
153
+ r_text = gr.Textbox(label="🧠 Reasoning", interactive=False)
154
+ with gr.Column(scale=1, min_width=150):
155
+ accept_btn = gr.Button("Accept", variant="primary")
156
+ reject_btn = gr.Button("Reject", variant="stop")
157
+
158
+ # Bind buttons
159
+ accept_btn.click(
160
+ fn=accept_question,
161
+ inputs=[q_text, a_text, r_text], # ✅ only inputs
162
+ outputs=[accept_btn, reject_btn, output_group] # ✅ update group visibility
163
+ )
164
+ reject_btn.click(fn=reject_card, outputs=[output_group])
165
+
166
+ output_components.extend([q_text, a_text, r_text, accept_btn, reject_btn, output_group])
167
+
168
+ generate_btn.click(
169
+ fn=generate_questions_ui,
170
+ inputs=[topic_input, num_questions_input],
171
+ outputs=output_components
172
+ )
173
+
174
+ demo.queue()
175
+
176
+ if __name__ == "__main__":
177
+ demo.launch(server_name="0.0.0.0", server_port=7860)
compose.yaml ADDED
@@ -0,0 +1,48 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # This file is for local development.
2
+ # It uses "build: ." to create an image from your local source code.
3
+
4
+ services:
5
+ # Service 1: The Answer Generation FastAPI app
6
+ answer_service:
7
+ build: .
8
+ container_name: answer_service
9
+ command: ["uvicorn", "answer_generation:app", "--host", "0.0.0.0", "--port", "8000"]
10
+ ports:
11
+ - "8000:8000"
12
+ env_file:
13
+ - .env
14
+ volumes:
15
+ - .:/app
16
+ environment:
17
+ - PYTHONPATH=/app
18
+
19
+ # Service 2: The Question Generation FastAPI app
20
+ question_service:
21
+ build: .
22
+ container_name: question_service
23
+ command: ["uvicorn", "question_generation:app", "--host", "0.0.0.0", "--port", "9000"]
24
+ ports:
25
+ - "9000:9000"
26
+ env_file:
27
+ - .env
28
+ volumes:
29
+ - .:/app
30
+ environment:
31
+ - PYTHONPATH=/app
32
+
33
+ # Service 3: The Gradio UI app
34
+ gradio_app:
35
+ build: .
36
+ container_name: gradio_app
37
+ command: ["python", "app.py"]
38
+ ports:
39
+ - "7860:7860"
40
+ env_file:
41
+ - .env
42
+ volumes:
43
+ - .:/app
44
+ depends_on:
45
+ - answer_service
46
+ - question_service
47
+ environment:
48
+ - PYTHONPATH=/app
controller.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ import os
3
+
4
+
5
+ QUESTION_GENERATION_URL = "http://127.0.0.1:8000"
6
+ ANSWER_GENERATION_URL = "http://127.0.0.1:9000"
7
+
8
+ def generate_questions(topic: str, num_questions: int = 10):
9
+ url = f"{QUESTION_GENERATION_URL}/generate-questions"
10
+ payload = {
11
+ "topic": topic,
12
+ "num_questions": num_questions
13
+ }
14
+ try:
15
+ response = requests.post(url, json=payload)
16
+ response.raise_for_status() # Raise error if status != 200
17
+ return response.json()
18
+ except requests.exceptions.RequestException as e:
19
+ return {"error": str(e)}
20
+
21
+ def generate_answer(question: str):
22
+ url = f"{ANSWER_GENERATION_URL}/generate"
23
+ payload = {"question": question}
24
+
25
+ try:
26
+ response = requests.post(url, json=payload)
27
+ response.raise_for_status()
28
+ return response.json()
29
+ except requests.exceptions.RequestException as e:
30
+ return {"error": str(e)}
db.py ADDED
@@ -0,0 +1,80 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from pymongo.mongo_client import MongoClient
2
+ from pymongo.server_api import ServerApi
3
+ import datetime
4
+ import os
5
+ import uuid
6
+ from dotenv import load_dotenv
7
+
8
+ # Load environment variables
9
+ load_dotenv()
10
+ uri = os.getenv("MONGO_URL")
11
+
12
+ # ✅ Create Mongo client
13
+ client = MongoClient(uri, server_api=ServerApi('1'))
14
+
15
+ # ✅ Ping test
16
+ try:
17
+ client.admin.command('ping')
18
+ print("✅ Connected to MongoDB!")
19
+ except Exception as e:
20
+ print("❌ MongoDB connection failed:", e)
21
+
22
+ # ✅ Database & Collection
23
+ db = client["ReasoningData"]
24
+ collection = db["formatted_data"]
25
+
26
+ # ✅ Global trackers
27
+ global_question_list = []
28
+ counter = 0
29
+
30
+
31
+ def generate_unique_id():
32
+ """
33
+ Generates a unique ID for each question.
34
+ Example: ODR_20250822_123456_uuid
35
+ """
36
+ global counter
37
+ prefix = "ODR"
38
+ timestamp = datetime.datetime.now().strftime("%Y%m%d%H%M%S")
39
+ unique_id = f"{prefix}_{timestamp}_{counter:05d}"
40
+ counter = (counter + 1) % 100000
41
+ return unique_id
42
+
43
+
44
+ def convert_into_mongo_document(parsed_json):
45
+ """
46
+ Converts parsed Pydantic object (or dict) into Mongo document.
47
+ """
48
+ if hasattr(parsed_json, "dict"): # if it's a Pydantic model
49
+ parsed_json = parsed_json.dict()
50
+
51
+ data = {
52
+ "question_id": generate_unique_id(),
53
+ "question_content": parsed_json.get("question_content"),
54
+ "answer_language": parsed_json.get("answer_language"),
55
+ "reasoning_content": parsed_json.get("reasoning_content"),
56
+ "answer_content": parsed_json.get("answer_content"),
57
+ }
58
+ return data
59
+
60
+
61
+ def insert_into_mongo(data):
62
+ """
63
+ Inserts a document into MongoDB.
64
+ """
65
+ try:
66
+ data["_id"] = data["question_id"]
67
+ result = collection.insert_one(data)
68
+ global_question_list.append(result.inserted_id)
69
+ print("✅ Inserted document ID:", result.inserted_id)
70
+ except Exception as e:
71
+ print("❌ Error inserting document:", e)
72
+
73
+
74
+ def save_in_db(parsed_json):
75
+ """
76
+ Full pipeline: convert → insert.
77
+ """
78
+ data = convert_into_mongo_document(parsed_json)
79
+ insert_into_mongo(data)
80
+
dockerfile ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ FROM python:3.10-slim
3
+
4
+ WORKDIR /app
5
+
6
+
7
+ COPY requirements.txt .
8
+
9
+
10
+ RUN pip install --no-cache-dir -r requirements.txt
11
+
12
+
13
+ COPY . .
14
+
15
+
llm_pipeline.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ example_odia_question_json = {
2
+ "question_content": "ଏକ ଦୃଷ୍ଟାନ୍ତ ପ୍ରଶ୍ନ",
3
+ "answer_language": "Odia",
4
+ "reasoning_content": "ଏଠାରେ ସଠିକ୍ ତରୀକାରେ ଧାରଣା ବିବେଚନା କରନ୍ତୁ",
5
+ "answer_content": "ଫଳାଫଳ"
6
+ }
7
+
8
+ example_odia_answer_json = {
9
+ "question_content": "ଏକ ଦୃଷ୍ଟାନ୍ତ ପ୍ରଶ୍ନ",
10
+ "answer_language": "Odia",
11
+ "reasoning_content": "ଏଠାରେ ଧାରଣା ବିବେଚନା",
12
+ "answer_content": "ଫଳାଫଳ"
13
+ }
prompts.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # prompts.py
2
+
3
+ PROMPTS = {
4
+ "questions_only": """
5
+ You are a meticulous question-setter who writes clear, solvable, but advance level reasoning-based mathematics questions in {language}.
6
+ The topic is: "{topic}".
7
+
8
+ The question should be of advanced level and should be of under graduate standard level, and should include multiple subtopics of the main topic to solve the question.
9
+
10
+ The question should be of 10 marks and at least take 10 minutes duration to solve.
11
+
12
+ It should force the user to think and solve the question by multiple steps.
13
+
14
+ The question should be conclusive , it should not have multiple final answers , although the approach might be multiple.
15
+
16
+ ### Requirements:
17
+ - Generate exactly {num_questions} unique questions (±1 if absolutely necessary).
18
+ - Language: {language} only (do not use English unless part of notation like symbols, variables, or numbers).
19
+ - Each question must be reasoning-based and solvable with pen-and-paper.
20
+ - Do NOT provide answers, solutions, hints, or commentary.
21
+ - Keep the questions self-contained and unambiguous.
22
+ - Use realistic and moderate numbers (avoid very large, complex, or impractical values).
23
+ - Ensure variety: mix easy, medium, and at most 2 hard-level questions.
24
+ - Stay strictly within the topic.
25
+
26
+ ### Output Format:
27
+ Return ONLY valid JSON, following this schema exactly:
28
+
29
+ {{
30
+ "question_language": "{language}",
31
+ "question_list": [
32
+ "<Q1 in {language}>",
33
+ "<Q2 in {language}>",
34
+ "<Q3 in {language}>",
35
+ ...
36
+ ]
37
+ }}
38
+
39
+ ### Rules:
40
+ - Output must be valid JSON (no markdown formatting, no ``` fences).
41
+ - Do NOT include extra keys or metadata.
42
+ - Do NOT repeat questions.
43
+ - Ensure the length of "question_list" is approximately {num_questions}.
44
+ """,
45
+
46
+ "odia_reasoning_generation_prompt": """
47
+ You are an intelligent Odia language reasoning teacher, highly experienced in teaching mental reasoning questions.
48
+ You are known for always giving correct answers and explaining the step-by-step reasoning clearly in Odia.
49
+ You will be provided with a query in JSON format like this: {example_odia_question_json} (use this only as an example).
50
+
51
+ ⚠️ IMPORTANT RULES:
52
+ - Output must be ONLY valid JSON (no markdown, no text outside JSON).
53
+ - Use the exact keys: question_content, answer_language, reasoning_content, answer_content.
54
+ - The key "answer_content" MUST contain the final concise answer in Odia; do NOT leave it empty.
55
+ - All fields must be filled.
56
+ - Always answer in Odia language.
57
+ - Do not invent new keys, do not add comments.
58
+ - reasoning_content must show proper step-by-step process, leading from the question to the final answer.
59
+
60
+ The JSON structure you must return is:
61
+ {{
62
+ "question_content": "string (the question in Odia)",
63
+ "answer_language": "Odia",
64
+ "reasoning_content": "string (detailed reasoning in Odia, step by step)",
65
+ "answer_content": "string (final concise answer in Odia)"
66
+ }}
67
+
68
+ Use this answer as an example: {example_answer_json}
69
+
70
+ Now, process the following query (already wrapped in JSON) and return the result strictly in the required JSON structure:
71
+
72
+ {{
73
+ "question_content": "{user_odia_question}",
74
+ "answer_language": "Odia",
75
+ "reasoning_content": "",
76
+ "answer_content": ""
77
+ }}
78
+ """
79
+ }
question_generation.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import json
3
+ from dotenv import load_dotenv
4
+ from pydantic import BaseModel, ValidationError
5
+ from typing import List
6
+ from prompts import PROMPTS
7
+ import google.generativeai as genai
8
+ from fastapi import FastAPI, HTTPException
9
+ import uvicorn
10
+
11
+ # Setup
12
+ load_dotenv()
13
+ google_api_key = os.getenv("GOOGLE_API_KEY")
14
+ genai.configure(api_key=os.getenv("GOOGLE_API_KEY"))
15
+ model = genai.GenerativeModel(os.getenv("LLM_MODEL", "gemini-pro"))
16
+
17
+ # Models
18
+ class TopicRequest(BaseModel):
19
+ topic: str
20
+ num_questions: int = 10
21
+
22
+ class GeneratedQuestionModel(BaseModel):
23
+ question_language: str
24
+ question_list: List[str]
25
+
26
+ # Functions
27
+ def chat_with_model(prompt: str) -> str:
28
+ try:
29
+ response = model.generate_content(prompt)
30
+ return response.text if response.text else "Error: Empty response"
31
+ except Exception as e:
32
+ return f"Error: {e}"
33
+
34
+ def clean_json_text(text: str) -> str:
35
+ if text.startswith("Error:"):
36
+ return text
37
+ if text.startswith("```"):
38
+ lines = text.split('\n')
39
+ text = '\n'.join(lines[1:-1]) if len(lines) > 2 else text.strip("`").replace("json", "", 1).strip()
40
+ first, last = text.find("{"), text.rfind("}")
41
+ return text[first:last+1] if first != -1 and last != -1 else text
42
+
43
+ def validate_answer(raw_output: str):
44
+ cleaned = clean_json_text(raw_output)
45
+ if cleaned.startswith("Error:"):
46
+ return {"error": cleaned, "question_language": "Odia", "question_list": []}
47
+ try:
48
+ return GeneratedQuestionModel.model_validate_json(cleaned).model_dump()
49
+ except ValidationError:
50
+ try:
51
+ return GeneratedQuestionModel(**json.loads(cleaned)).model_dump()
52
+ except:
53
+ return {"error": "Invalid JSON", "question_language": "Odia", "question_list": []}
54
+
55
+ def final_pipeline(user_input: str, num_questions: int = 10):
56
+ prompt = PROMPTS["questions_only"].format(language="Odia", topic=user_input, num_questions=num_questions)
57
+ return validate_answer(chat_with_model(prompt))
58
+
59
+ # API
60
+ app = FastAPI()
61
+
62
+ @app.get("/health")
63
+ async def health_check():
64
+ try:
65
+ # Test model connectivity
66
+ test_response = model.generate_content("Test")
67
+ return {
68
+ "status": "healthy",
69
+ "model": os.getenv("LLM_MODEL", "gemini-pro"),
70
+ "api_configured": bool(google_api_key)
71
+ }
72
+ except Exception as e:
73
+ return {
74
+ "status": "unhealthy",
75
+ "error": str(e),
76
+ "api_configured": bool(google_api_key)
77
+ }
78
+ @app.get("/")
79
+ async def root():
80
+ return {"message": "Odia Question Generating API is running", "status": "healthy"}
81
+
82
+
83
+ @app.post("/generate-questions")
84
+ async def generate_questions(request: TopicRequest):
85
+ if not request.topic.strip():
86
+ raise HTTPException(status_code=400, detail="Topic cannot be empty")
87
+ if not 1 <= request.num_questions <= 50:
88
+ raise HTTPException(status_code=400, detail="Questions must be between 1-50")
89
+
90
+ result = final_pipeline(request.topic.strip(), request.num_questions)
91
+
92
+ if "error" in result and "Error:" in result["error"]:
93
+ raise HTTPException(status_code=500, detail=result["error"])
94
+
95
+ return {"success": True, "data": result}
96
+
97
+ if __name__ == "__main__":
98
+ uvicorn.run(app, host="127.0.0.1", port=8000)
requirements.txt ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # FastAPI and Server
2
+ fastapi==0.115.9
3
+ uvicorn==0.31.0
4
+
5
+ # Google Generative AI
6
+ google-generativeai==0.8.5
7
+
8
+ # Gradio UI
9
+ gradio==5.31.0
10
+
11
+ # Database
12
+ pymongo==4.11.3
13
+
14
+ # Data Validation
15
+ pydantic==2.11.5
16
+
17
+ # HTTP Requests
18
+ requests==2.32.3
19
+
20
+ # Environment Variables
21
+ python-dotenv==1.0.1