| from langchain.prompts import PromptTemplate | |
| from transformers import pipeline | |
| from langchain_core.documents import Document | |
| def generate_summary(full_text): | |
| summarizer = pipeline("summarization") | |
| result = summarizer(full_text[:4000], max_length=150, min_length=30, do_sample=False) | |
| return result[0]['summary_text'] | |
| def generate_challenge_questions(qa_chain): | |
| prompt = "Generate 3 logic/comprehension-based questions from the uploaded document." | |
| result = qa_chain({ "question": prompt, "chat_history": [] }) | |
| output = result["answer"] | |
| questions = output.split("\n")[:3] | |
| return [q.strip() for q in questions if q.strip()] | |
| def evaluate_responses(qa_chain, questions, answers): | |
| combined = "\n".join([f"Q: {q}\nA: {a}" for q, a in zip(questions, answers)]) | |
| prompt = f"Evaluate the user's answers below. For each, say if it's correct or not and justify:\n\n{combined}" | |
| result = qa_chain({ "question": prompt, "chat_history": [] }) | |
| return result["answer"] | |
| def extract_highlight_snippets(source_docs: list[Document]): | |
| refs = [doc.page_content.strip()[:300] + "..." for doc in source_docs[:3]] | |
| return refs + [""] * (3 - len(refs)) | |