Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -179,6 +179,230 @@
|
|
| 179 |
|
| 180 |
|
| 181 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 182 |
import gradio as gr
|
| 183 |
import fitz # PyMuPDF
|
| 184 |
import torch
|
|
@@ -201,15 +425,12 @@ class OnnxBgeEmbeddings(Embeddings):
|
|
| 201 |
def __init__(self, model_name="BAAI/bge-large-en-v1.5"):
|
| 202 |
print(f"🔄 Loading Embeddings: {model_name}...")
|
| 203 |
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 204 |
-
# Note: export=True will re-convert on every restart.
|
| 205 |
-
# For production, you'd want to save this permanently, but this works for now.
|
| 206 |
self.model = ORTModelForFeatureExtraction.from_pretrained(model_name, export=True)
|
| 207 |
|
| 208 |
def _process_batch(self, texts):
|
| 209 |
inputs = self.tokenizer(texts, padding=True, truncation=True, max_length=512, return_tensors="pt")
|
| 210 |
with torch.no_grad():
|
| 211 |
outputs = self.model(**inputs)
|
| 212 |
-
# CLS pooling for BGE
|
| 213 |
embeddings = outputs.last_hidden_state[:, 0]
|
| 214 |
embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
|
| 215 |
return embeddings.numpy().tolist()
|
|
@@ -221,18 +442,16 @@ class OnnxBgeEmbeddings(Embeddings):
|
|
| 221 |
return self._process_batch(["Represent this sentence for searching relevant passages: " + text])[0]
|
| 222 |
|
| 223 |
# ---------------------------------------------------------
|
| 224 |
-
# 2. LLM Evaluator Class (Llama-3.2-1B ONNX)
|
| 225 |
# ---------------------------------------------------------
|
| 226 |
-
|
| 227 |
class LLMEvaluator:
|
| 228 |
def __init__(self):
|
| 229 |
self.repo_id = "onnx-community/Llama-3.2-1B-Instruct"
|
| 230 |
self.local_dir = "onnx_llama_local"
|
| 231 |
|
| 232 |
-
print(f"🔄 Preparing LLM: {self.repo_id}...")
|
| 233 |
|
| 234 |
-
|
| 235 |
-
print(f"📥 Downloading FP16 model + data to {self.local_dir}...")
|
| 236 |
snapshot_download(
|
| 237 |
repo_id=self.repo_id,
|
| 238 |
local_dir=self.local_dir,
|
|
@@ -243,45 +462,57 @@ class LLMEvaluator:
|
|
| 243 |
"tokenizer*",
|
| 244 |
"special_tokens_map.json",
|
| 245 |
"*.jinja",
|
| 246 |
-
"onnx/
|
| 247 |
]
|
| 248 |
)
|
| 249 |
print("✅ Download complete.")
|
| 250 |
|
| 251 |
self.tokenizer = AutoTokenizer.from_pretrained(self.local_dir)
|
| 252 |
|
| 253 |
-
# [CRITICAL FIX]
|
| 254 |
-
# Separating 'subfolder' and 'file_name' is required by Optimum
|
| 255 |
self.model = ORTModelForCausalLM.from_pretrained(
|
| 256 |
self.local_dir,
|
| 257 |
-
subfolder="onnx",
|
| 258 |
-
file_name="
|
| 259 |
use_cache=True,
|
| 260 |
use_io_binding=False
|
| 261 |
)
|
| 262 |
|
| 263 |
def evaluate(self, context, question, student_answer):
|
| 264 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 265 |
messages = [
|
| 266 |
-
{"role": "system", "content":
|
| 267 |
-
{"role": "user", "content":
|
| 268 |
-
### CONTEXT:
|
| 269 |
-
{context}
|
| 270 |
-
|
| 271 |
-
### QUESTION:
|
| 272 |
-
{question}
|
| 273 |
-
|
| 274 |
-
### STUDENT ANSWER:
|
| 275 |
-
{student_answer}
|
| 276 |
-
|
| 277 |
-
### INSTRUCTIONS:
|
| 278 |
-
1. Is the answer correct?
|
| 279 |
-
2. Score out of 10.
|
| 280 |
-
3. Explanation.
|
| 281 |
-
"""}
|
| 282 |
]
|
| 283 |
|
| 284 |
-
# Format input using the chat template
|
| 285 |
input_text = self.tokenizer.apply_chat_template(
|
| 286 |
messages,
|
| 287 |
tokenize=False,
|
|
@@ -290,23 +521,21 @@ class LLMEvaluator:
|
|
| 290 |
|
| 291 |
inputs = self.tokenizer(input_text, return_tensors="pt")
|
| 292 |
|
| 293 |
-
# Generate response
|
| 294 |
with torch.no_grad():
|
| 295 |
outputs = self.model.generate(
|
| 296 |
**inputs,
|
| 297 |
-
max_new_tokens=
|
| 298 |
-
temperature=0.
|
| 299 |
do_sample=True,
|
| 300 |
top_p=0.9
|
| 301 |
)
|
| 302 |
|
| 303 |
-
# Decode response
|
| 304 |
response = self.tokenizer.decode(
|
| 305 |
outputs[0][inputs.input_ids.shape[1]:],
|
| 306 |
skip_special_tokens=True
|
| 307 |
)
|
| 308 |
return response
|
| 309 |
-
|
| 310 |
# ---------------------------------------------------------
|
| 311 |
# 3. Main Application Logic
|
| 312 |
# ---------------------------------------------------------
|
|
@@ -314,7 +543,7 @@ class VectorSystem:
|
|
| 314 |
def __init__(self):
|
| 315 |
self.vector_store = None
|
| 316 |
self.embeddings = OnnxBgeEmbeddings()
|
| 317 |
-
self.llm = LLMEvaluator()
|
| 318 |
self.all_chunks = []
|
| 319 |
|
| 320 |
def process_file(self, file_obj):
|
|
@@ -334,6 +563,7 @@ class VectorSystem:
|
|
| 334 |
|
| 335 |
if not self.all_chunks: return "File empty."
|
| 336 |
|
|
|
|
| 337 |
metadatas = [{"id": i} for i in range(len(self.all_chunks))]
|
| 338 |
self.vector_store = FAISS.from_texts(self.all_chunks, self.embeddings, metadatas=metadatas)
|
| 339 |
return f"✅ Indexed {len(self.all_chunks)} chunks."
|
|
@@ -344,18 +574,44 @@ class VectorSystem:
|
|
| 344 |
if not self.vector_store: return "⚠️ Please upload a file first.", ""
|
| 345 |
if not question: return "⚠️ Enter a question.", ""
|
| 346 |
|
| 347 |
-
# 1.
|
| 348 |
-
results = self.vector_store.similarity_search_with_score(question, k=
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 349 |
|
| 350 |
-
|
| 351 |
-
context_text = "\n\n".join([doc.page_content for doc, _ in results])
|
| 352 |
|
| 353 |
-
|
| 354 |
-
|
| 355 |
-
for i, (doc, score) in enumerate(results):
|
| 356 |
-
evidence_display += f"**Chunk {i+1}** (Score: {score:.4f}):\n> {doc.page_content}\n\n"
|
| 357 |
|
| 358 |
-
#
|
| 359 |
llm_feedback = "Please enter a student answer to grade."
|
| 360 |
if student_answer:
|
| 361 |
llm_feedback = self.llm.evaluate(context_text, question, student_answer)
|
|
@@ -368,7 +624,7 @@ system = VectorSystem()
|
|
| 368 |
# --- GRADIO UI ---
|
| 369 |
with gr.Blocks(title="EduGenius AI Grader") as demo:
|
| 370 |
gr.Markdown("# 🧠 EduGenius: RAG + LLM Grading")
|
| 371 |
-
gr.Markdown("Powered by **BGE-Large** (Retrieval) and **Llama-3.2-1B** (Evaluation)
|
| 372 |
|
| 373 |
with gr.Row():
|
| 374 |
with gr.Column(scale=1):
|
|
@@ -382,8 +638,8 @@ with gr.Blocks(title="EduGenius AI Grader") as demo:
|
|
| 382 |
run_btn = gr.Button("Retrieve & Grade", variant="secondary")
|
| 383 |
|
| 384 |
with gr.Row():
|
| 385 |
-
evidence_box = gr.Markdown(label="Context")
|
| 386 |
-
grade_box = gr.Markdown(label="LLM Evaluation")
|
| 387 |
|
| 388 |
upload_btn.click(system.process_file, inputs=[pdf_input], outputs=[status_msg])
|
| 389 |
run_btn.click(system.process_query, inputs=[q_input, a_input], outputs=[evidence_box, grade_box])
|
|
|
|
| 179 |
|
| 180 |
|
| 181 |
|
| 182 |
+
# import gradio as gr
|
| 183 |
+
# import fitz # PyMuPDF
|
| 184 |
+
# import torch
|
| 185 |
+
# import os
|
| 186 |
+
|
| 187 |
+
# # --- LANGCHAIN & RAG IMPORTS ---
|
| 188 |
+
# from langchain_text_splitters import RecursiveCharacterTextSplitter
|
| 189 |
+
# from langchain_community.vectorstores import FAISS
|
| 190 |
+
# from langchain_core.embeddings import Embeddings
|
| 191 |
+
|
| 192 |
+
# # --- ONNX & MODEL IMPORTS ---
|
| 193 |
+
# from transformers import AutoTokenizer
|
| 194 |
+
# from optimum.onnxruntime import ORTModelForFeatureExtraction, ORTModelForCausalLM
|
| 195 |
+
# from huggingface_hub import snapshot_download
|
| 196 |
+
|
| 197 |
+
# # ---------------------------------------------------------
|
| 198 |
+
# # 1. Custom ONNX Embedding Class (BGE-Large)
|
| 199 |
+
# # ---------------------------------------------------------
|
| 200 |
+
# class OnnxBgeEmbeddings(Embeddings):
|
| 201 |
+
# def __init__(self, model_name="BAAI/bge-large-en-v1.5"):
|
| 202 |
+
# print(f"🔄 Loading Embeddings: {model_name}...")
|
| 203 |
+
# self.tokenizer = AutoTokenizer.from_pretrained(model_name)
|
| 204 |
+
# # Note: export=True will re-convert on every restart.
|
| 205 |
+
# # For production, you'd want to save this permanently, but this works for now.
|
| 206 |
+
# self.model = ORTModelForFeatureExtraction.from_pretrained(model_name, export=True)
|
| 207 |
+
|
| 208 |
+
# def _process_batch(self, texts):
|
| 209 |
+
# inputs = self.tokenizer(texts, padding=True, truncation=True, max_length=512, return_tensors="pt")
|
| 210 |
+
# with torch.no_grad():
|
| 211 |
+
# outputs = self.model(**inputs)
|
| 212 |
+
# # CLS pooling for BGE
|
| 213 |
+
# embeddings = outputs.last_hidden_state[:, 0]
|
| 214 |
+
# embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
|
| 215 |
+
# return embeddings.numpy().tolist()
|
| 216 |
+
|
| 217 |
+
# def embed_documents(self, texts):
|
| 218 |
+
# return self._process_batch(texts)
|
| 219 |
+
|
| 220 |
+
# def embed_query(self, text):
|
| 221 |
+
# return self._process_batch(["Represent this sentence for searching relevant passages: " + text])[0]
|
| 222 |
+
|
| 223 |
+
# # ---------------------------------------------------------
|
| 224 |
+
# # 2. LLM Evaluator Class (Llama-3.2-1B ONNX)
|
| 225 |
+
# # ---------------------------------------------------------
|
| 226 |
+
|
| 227 |
+
# class LLMEvaluator:
|
| 228 |
+
# def __init__(self):
|
| 229 |
+
# self.repo_id = "onnx-community/Llama-3.2-1B-Instruct"
|
| 230 |
+
# self.local_dir = "onnx_llama_local"
|
| 231 |
+
|
| 232 |
+
# print(f"🔄 Preparing LLM: {self.repo_id}...")
|
| 233 |
+
|
| 234 |
+
# # [FIXED DOWNLOADER]
|
| 235 |
+
# print(f"📥 Downloading FP16 model + data to {self.local_dir}...")
|
| 236 |
+
# snapshot_download(
|
| 237 |
+
# repo_id=self.repo_id,
|
| 238 |
+
# local_dir=self.local_dir,
|
| 239 |
+
# local_dir_use_symlinks=False,
|
| 240 |
+
# allow_patterns=[
|
| 241 |
+
# "config.json",
|
| 242 |
+
# "generation_config.json",
|
| 243 |
+
# "tokenizer*",
|
| 244 |
+
# "special_tokens_map.json",
|
| 245 |
+
# "*.jinja",
|
| 246 |
+
# "onnx/model_fp16.onnx*" # WILDCARD '*' ensures we get .onnx AND .onnx_data
|
| 247 |
+
# ]
|
| 248 |
+
# )
|
| 249 |
+
# print("✅ Download complete.")
|
| 250 |
+
|
| 251 |
+
# self.tokenizer = AutoTokenizer.from_pretrained(self.local_dir)
|
| 252 |
+
|
| 253 |
+
# # [CRITICAL FIX]
|
| 254 |
+
# # Separating 'subfolder' and 'file_name' is required by Optimum
|
| 255 |
+
# self.model = ORTModelForCausalLM.from_pretrained(
|
| 256 |
+
# self.local_dir,
|
| 257 |
+
# subfolder="onnx", # Point to the subfolder
|
| 258 |
+
# file_name="model_fp16.onnx", # Just the filename
|
| 259 |
+
# use_cache=True,
|
| 260 |
+
# use_io_binding=False
|
| 261 |
+
# )
|
| 262 |
+
|
| 263 |
+
# def evaluate(self, context, question, student_answer):
|
| 264 |
+
# # Prompt Engineering for Llama 3
|
| 265 |
+
# messages = [
|
| 266 |
+
# {"role": "system", "content": "You are a helpful academic grader. Grade the student answer based ONLY on the provided context."},
|
| 267 |
+
# {"role": "user", "content": f"""
|
| 268 |
+
# ### CONTEXT:
|
| 269 |
+
# {context}
|
| 270 |
+
|
| 271 |
+
# ### QUESTION:
|
| 272 |
+
# {question}
|
| 273 |
+
|
| 274 |
+
# ### STUDENT ANSWER:
|
| 275 |
+
# {student_answer}
|
| 276 |
+
|
| 277 |
+
# ### INSTRUCTIONS:
|
| 278 |
+
# 1. Is the answer correct?
|
| 279 |
+
# 2. Score out of 10.
|
| 280 |
+
# 3. Explanation.
|
| 281 |
+
# """}
|
| 282 |
+
# ]
|
| 283 |
+
|
| 284 |
+
# # Format input using the chat template
|
| 285 |
+
# input_text = self.tokenizer.apply_chat_template(
|
| 286 |
+
# messages,
|
| 287 |
+
# tokenize=False,
|
| 288 |
+
# add_generation_prompt=True
|
| 289 |
+
# )
|
| 290 |
+
|
| 291 |
+
# inputs = self.tokenizer(input_text, return_tensors="pt")
|
| 292 |
+
|
| 293 |
+
# # Generate response
|
| 294 |
+
# with torch.no_grad():
|
| 295 |
+
# outputs = self.model.generate(
|
| 296 |
+
# **inputs,
|
| 297 |
+
# max_new_tokens=256,
|
| 298 |
+
# temperature=0.3,
|
| 299 |
+
# do_sample=True,
|
| 300 |
+
# top_p=0.9
|
| 301 |
+
# )
|
| 302 |
+
|
| 303 |
+
# # Decode response
|
| 304 |
+
# response = self.tokenizer.decode(
|
| 305 |
+
# outputs[0][inputs.input_ids.shape[1]:],
|
| 306 |
+
# skip_special_tokens=True
|
| 307 |
+
# )
|
| 308 |
+
# return response
|
| 309 |
+
|
| 310 |
+
# # ---------------------------------------------------------
|
| 311 |
+
# # 3. Main Application Logic
|
| 312 |
+
# # ---------------------------------------------------------
|
| 313 |
+
# class VectorSystem:
|
| 314 |
+
# def __init__(self):
|
| 315 |
+
# self.vector_store = None
|
| 316 |
+
# self.embeddings = OnnxBgeEmbeddings()
|
| 317 |
+
# self.llm = LLMEvaluator() # Initialize LLM
|
| 318 |
+
# self.all_chunks = []
|
| 319 |
+
|
| 320 |
+
# def process_file(self, file_obj):
|
| 321 |
+
# if file_obj is None: return "No file uploaded."
|
| 322 |
+
# try:
|
| 323 |
+
# text = ""
|
| 324 |
+
# if file_obj.name.endswith('.pdf'):
|
| 325 |
+
# doc = fitz.open(file_obj.name)
|
| 326 |
+
# for page in doc: text += page.get_text()
|
| 327 |
+
# elif file_obj.name.endswith('.txt'):
|
| 328 |
+
# with open(file_obj.name, 'r', encoding='utf-8') as f: text = f.read()
|
| 329 |
+
# else:
|
| 330 |
+
# return "❌ Error: Only .pdf and .txt supported."
|
| 331 |
+
|
| 332 |
+
# text_splitter = RecursiveCharacterTextSplitter(chunk_size=800, chunk_overlap=150)
|
| 333 |
+
# self.all_chunks = text_splitter.split_text(text)
|
| 334 |
+
|
| 335 |
+
# if not self.all_chunks: return "File empty."
|
| 336 |
+
|
| 337 |
+
# metadatas = [{"id": i} for i in range(len(self.all_chunks))]
|
| 338 |
+
# self.vector_store = FAISS.from_texts(self.all_chunks, self.embeddings, metadatas=metadatas)
|
| 339 |
+
# return f"✅ Indexed {len(self.all_chunks)} chunks."
|
| 340 |
+
# except Exception as e:
|
| 341 |
+
# return f"Error: {str(e)}"
|
| 342 |
+
|
| 343 |
+
# def process_query(self, question, student_answer):
|
| 344 |
+
# if not self.vector_store: return "⚠️ Please upload a file first.", ""
|
| 345 |
+
# if not question: return "⚠️ Enter a question.", ""
|
| 346 |
+
|
| 347 |
+
# # 1. Retrieve
|
| 348 |
+
# results = self.vector_store.similarity_search_with_score(question, k=3)
|
| 349 |
+
|
| 350 |
+
# # Prepare context for LLM
|
| 351 |
+
# context_text = "\n\n".join([doc.page_content for doc, _ in results])
|
| 352 |
+
|
| 353 |
+
# # Prepare Evidence Output for UI
|
| 354 |
+
# evidence_display = "### 📚 Retrieved Context:\n"
|
| 355 |
+
# for i, (doc, score) in enumerate(results):
|
| 356 |
+
# evidence_display += f"**Chunk {i+1}** (Score: {score:.4f}):\n> {doc.page_content}\n\n"
|
| 357 |
+
|
| 358 |
+
# # 2. Evaluate (if answer provided)
|
| 359 |
+
# llm_feedback = "Please enter a student answer to grade."
|
| 360 |
+
# if student_answer:
|
| 361 |
+
# llm_feedback = self.llm.evaluate(context_text, question, student_answer)
|
| 362 |
+
|
| 363 |
+
# return evidence_display, llm_feedback
|
| 364 |
+
|
| 365 |
+
# # Initialize
|
| 366 |
+
# system = VectorSystem()
|
| 367 |
+
|
| 368 |
+
# # --- GRADIO UI ---
|
| 369 |
+
# with gr.Blocks(title="EduGenius AI Grader") as demo:
|
| 370 |
+
# gr.Markdown("# 🧠 EduGenius: RAG + LLM Grading")
|
| 371 |
+
# gr.Markdown("Powered by **BGE-Large** (Retrieval) and **Llama-3.2-1B** (Evaluation) - All ONNX Optimized.")
|
| 372 |
+
|
| 373 |
+
# with gr.Row():
|
| 374 |
+
# with gr.Column(scale=1):
|
| 375 |
+
# pdf_input = gr.File(label="1. Upload Chapter (PDF/TXT)")
|
| 376 |
+
# upload_btn = gr.Button("Index Content", variant="primary")
|
| 377 |
+
# status_msg = gr.Textbox(label="System Status", interactive=False)
|
| 378 |
+
|
| 379 |
+
# with gr.Column(scale=2):
|
| 380 |
+
# q_input = gr.Textbox(label="2. Question")
|
| 381 |
+
# a_input = gr.Textbox(label="3. Student Answer")
|
| 382 |
+
# run_btn = gr.Button("Retrieve & Grade", variant="secondary")
|
| 383 |
+
|
| 384 |
+
# with gr.Row():
|
| 385 |
+
# evidence_box = gr.Markdown(label="Context")
|
| 386 |
+
# grade_box = gr.Markdown(label="LLM Evaluation")
|
| 387 |
+
|
| 388 |
+
# upload_btn.click(system.process_file, inputs=[pdf_input], outputs=[status_msg])
|
| 389 |
+
# run_btn.click(system.process_query, inputs=[q_input, a_input], outputs=[evidence_box, grade_box])
|
| 390 |
+
|
| 391 |
+
# if __name__ == "__main__":
|
| 392 |
+
# demo.launch()
|
| 393 |
+
|
| 394 |
+
|
| 395 |
+
|
| 396 |
+
|
| 397 |
+
|
| 398 |
+
|
| 399 |
+
|
| 400 |
+
|
| 401 |
+
|
| 402 |
+
|
| 403 |
+
|
| 404 |
+
|
| 405 |
+
|
| 406 |
import gradio as gr
|
| 407 |
import fitz # PyMuPDF
|
| 408 |
import torch
|
|
|
|
| 425 |
def __init__(self, model_name="BAAI/bge-large-en-v1.5"):
|
| 426 |
print(f"🔄 Loading Embeddings: {model_name}...")
|
| 427 |
self.tokenizer = AutoTokenizer.from_pretrained(model_name)
|
|
|
|
|
|
|
| 428 |
self.model = ORTModelForFeatureExtraction.from_pretrained(model_name, export=True)
|
| 429 |
|
| 430 |
def _process_batch(self, texts):
|
| 431 |
inputs = self.tokenizer(texts, padding=True, truncation=True, max_length=512, return_tensors="pt")
|
| 432 |
with torch.no_grad():
|
| 433 |
outputs = self.model(**inputs)
|
|
|
|
| 434 |
embeddings = outputs.last_hidden_state[:, 0]
|
| 435 |
embeddings = torch.nn.functional.normalize(embeddings, p=2, dim=1)
|
| 436 |
return embeddings.numpy().tolist()
|
|
|
|
| 442 |
return self._process_batch(["Represent this sentence for searching relevant passages: " + text])[0]
|
| 443 |
|
| 444 |
# ---------------------------------------------------------
|
| 445 |
+
# 2. LLM Evaluator Class (Llama-3.2-1B ONNX - INT8 QUANTIZED)
|
| 446 |
# ---------------------------------------------------------
|
|
|
|
| 447 |
class LLMEvaluator:
|
| 448 |
def __init__(self):
|
| 449 |
self.repo_id = "onnx-community/Llama-3.2-1B-Instruct"
|
| 450 |
self.local_dir = "onnx_llama_local"
|
| 451 |
|
| 452 |
+
print(f"🔄 Preparing LLM: {self.repo_id} (Int8 Quantized)...")
|
| 453 |
|
| 454 |
+
print(f"📥 Downloading Int8 model to {self.local_dir}...")
|
|
|
|
| 455 |
snapshot_download(
|
| 456 |
repo_id=self.repo_id,
|
| 457 |
local_dir=self.local_dir,
|
|
|
|
| 462 |
"tokenizer*",
|
| 463 |
"special_tokens_map.json",
|
| 464 |
"*.jinja",
|
| 465 |
+
"onnx/model_quantized.onnx"
|
| 466 |
]
|
| 467 |
)
|
| 468 |
print("✅ Download complete.")
|
| 469 |
|
| 470 |
self.tokenizer = AutoTokenizer.from_pretrained(self.local_dir)
|
| 471 |
|
|
|
|
|
|
|
| 472 |
self.model = ORTModelForCausalLM.from_pretrained(
|
| 473 |
self.local_dir,
|
| 474 |
+
subfolder="onnx",
|
| 475 |
+
file_name="model_quantized.onnx",
|
| 476 |
use_cache=True,
|
| 477 |
use_io_binding=False
|
| 478 |
)
|
| 479 |
|
| 480 |
def evaluate(self, context, question, student_answer):
|
| 481 |
+
# STRICT PROMPT (As requested)
|
| 482 |
+
system_prompt = """You are a strict academic grader.
|
| 483 |
+
|
| 484 |
+
RULES:
|
| 485 |
+
1. BASE YOUR SCORE ONLY ON THE CONTEXT PROVIDED.
|
| 486 |
+
2. If the student answer contradicts the context, give a score of 0-2.
|
| 487 |
+
3. If the context says 'A is B', and the student says 'A is C', the student is WRONG.
|
| 488 |
+
4. Be harsh. Do not give credit for vague or hallucinatory answers."""
|
| 489 |
+
|
| 490 |
+
user_prompt = f"""
|
| 491 |
+
### SOURCE MATERIAL (CONTEXT):
|
| 492 |
+
{context}
|
| 493 |
+
|
| 494 |
+
### EXAM QUESTION:
|
| 495 |
+
{question}
|
| 496 |
+
|
| 497 |
+
### STUDENT ANSWER:
|
| 498 |
+
{student_answer}
|
| 499 |
+
|
| 500 |
+
### INSTRUCTIONS:
|
| 501 |
+
Compare the Student Answer to the Source Material.
|
| 502 |
+
- Does the student explicitly mention the key points found in the text?
|
| 503 |
+
- If the student describes something NOT in the text (e.g., "looking in" vs "looking out"), mark it wrong.
|
| 504 |
+
|
| 505 |
+
OUTPUT FORMAT:
|
| 506 |
+
Score: [0-10]
|
| 507 |
+
Verdict: [Correct/Incorrect/Partially Correct]
|
| 508 |
+
Explanation: [1-2 sentences explaining why, citing the text]
|
| 509 |
+
"""
|
| 510 |
+
|
| 511 |
messages = [
|
| 512 |
+
{"role": "system", "content": system_prompt},
|
| 513 |
+
{"role": "user", "content": user_prompt}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 514 |
]
|
| 515 |
|
|
|
|
| 516 |
input_text = self.tokenizer.apply_chat_template(
|
| 517 |
messages,
|
| 518 |
tokenize=False,
|
|
|
|
| 521 |
|
| 522 |
inputs = self.tokenizer(input_text, return_tensors="pt")
|
| 523 |
|
|
|
|
| 524 |
with torch.no_grad():
|
| 525 |
outputs = self.model.generate(
|
| 526 |
**inputs,
|
| 527 |
+
max_new_tokens=200,
|
| 528 |
+
temperature=0.1,
|
| 529 |
do_sample=True,
|
| 530 |
top_p=0.9
|
| 531 |
)
|
| 532 |
|
|
|
|
| 533 |
response = self.tokenizer.decode(
|
| 534 |
outputs[0][inputs.input_ids.shape[1]:],
|
| 535 |
skip_special_tokens=True
|
| 536 |
)
|
| 537 |
return response
|
| 538 |
+
|
| 539 |
# ---------------------------------------------------------
|
| 540 |
# 3. Main Application Logic
|
| 541 |
# ---------------------------------------------------------
|
|
|
|
| 543 |
def __init__(self):
|
| 544 |
self.vector_store = None
|
| 545 |
self.embeddings = OnnxBgeEmbeddings()
|
| 546 |
+
self.llm = LLMEvaluator()
|
| 547 |
self.all_chunks = []
|
| 548 |
|
| 549 |
def process_file(self, file_obj):
|
|
|
|
| 563 |
|
| 564 |
if not self.all_chunks: return "File empty."
|
| 565 |
|
| 566 |
+
# We store the ID to look up neighbors later
|
| 567 |
metadatas = [{"id": i} for i in range(len(self.all_chunks))]
|
| 568 |
self.vector_store = FAISS.from_texts(self.all_chunks, self.embeddings, metadatas=metadatas)
|
| 569 |
return f"✅ Indexed {len(self.all_chunks)} chunks."
|
|
|
|
| 574 |
if not self.vector_store: return "⚠️ Please upload a file first.", ""
|
| 575 |
if not question: return "⚠️ Enter a question.", ""
|
| 576 |
|
| 577 |
+
# 1. RAG RETRIEVAL - Get ONLY the Top 1 Best Match
|
| 578 |
+
results = self.vector_store.similarity_search_with_score(question, k=1)
|
| 579 |
+
|
| 580 |
+
if not results:
|
| 581 |
+
return "No relevant text found.", ""
|
| 582 |
+
|
| 583 |
+
# Get the ID of the best chunk
|
| 584 |
+
best_doc, score = results[0]
|
| 585 |
+
chunk_id = best_doc.metadata['id']
|
| 586 |
+
|
| 587 |
+
# 2. CONTEXT EXPANSION (Neighboring Chunks)
|
| 588 |
+
# We retrieve Preceding + Current + Succeeding to repair cut-off sentences.
|
| 589 |
+
|
| 590 |
+
# Get Preceding Chunk (if not at start)
|
| 591 |
+
prev_text = self.all_chunks[chunk_id - 1] if chunk_id > 0 else ""
|
| 592 |
+
|
| 593 |
+
# Get Current Chunk
|
| 594 |
+
curr_text = self.all_chunks[chunk_id]
|
| 595 |
+
|
| 596 |
+
# Get Succeeding Chunk (if not at end)
|
| 597 |
+
next_text = self.all_chunks[chunk_id + 1] if chunk_id < len(self.all_chunks) - 1 else ""
|
| 598 |
+
|
| 599 |
+
# Join them into one solid block of text for the LLM
|
| 600 |
+
context_text = f"{prev_text}\n\n{curr_text}\n\n{next_text}"
|
| 601 |
+
|
| 602 |
+
# 3. UI DISPLAY
|
| 603 |
+
# We format this nicely so the user knows what part is the "Core Match"
|
| 604 |
+
evidence_display = f"### 🎯 Best Match (Score: {score:.4f})\n"
|
| 605 |
+
|
| 606 |
+
if prev_text:
|
| 607 |
+
evidence_display += f"> **PREVIOUS CONTEXT:**\n...{prev_text[-400:]}\n\n" # Show last 400 chars
|
| 608 |
|
| 609 |
+
evidence_display += f"> **CORE MATCH:**\n**{curr_text}**\n\n"
|
|
|
|
| 610 |
|
| 611 |
+
if next_text:
|
| 612 |
+
evidence_display += f"> **NEXT CONTEXT:**\n{next_text[:400]}...\n" # Show first 400 chars
|
|
|
|
|
|
|
| 613 |
|
| 614 |
+
# 4. LLM EVALUATION
|
| 615 |
llm_feedback = "Please enter a student answer to grade."
|
| 616 |
if student_answer:
|
| 617 |
llm_feedback = self.llm.evaluate(context_text, question, student_answer)
|
|
|
|
| 624 |
# --- GRADIO UI ---
|
| 625 |
with gr.Blocks(title="EduGenius AI Grader") as demo:
|
| 626 |
gr.Markdown("# 🧠 EduGenius: RAG + LLM Grading")
|
| 627 |
+
gr.Markdown("Powered by **BGE-Large** (Retrieval) and **Llama-3.2-1B-Int8** (Evaluation).")
|
| 628 |
|
| 629 |
with gr.Row():
|
| 630 |
with gr.Column(scale=1):
|
|
|
|
| 638 |
run_btn = gr.Button("Retrieve & Grade", variant="secondary")
|
| 639 |
|
| 640 |
with gr.Row():
|
| 641 |
+
evidence_box = gr.Markdown(label="Context Used for Grading")
|
| 642 |
+
grade_box = gr.Markdown(label="LLM Evaluation Result")
|
| 643 |
|
| 644 |
upload_btn.click(system.process_file, inputs=[pdf_input], outputs=[status_msg])
|
| 645 |
run_btn.click(system.process_query, inputs=[q_input, a_input], outputs=[evidence_box, grade_box])
|