|
|
import os |
|
|
import gradio as gr |
|
|
from langchain.prompts import ChatPromptTemplate |
|
|
from langchain_community.chat_models import ChatOpenAI |
|
|
from langchain.schema import StrOutputParser |
|
|
|
|
|
def create_the_question_prompt_template(num_questions, questions_type, difficulty_level, context): |
|
|
"""Create the prompt template for the questions generator app.""" |
|
|
template = f"""Create {num_questions} {questions_type} questions keeping difficulty level as {difficulty_level} about the following concept/contents: {context}. |
|
|
The format of the question could be one of the following: |
|
|
- Multiple-choice: |
|
|
Questions: |
|
|
<Question1>:<A. Answer 1>,<B. Answer 2>,<C. Answer 3>,<D. Answer 4> |
|
|
<Question2>:<A. Answer 1>,<B. Answer 2>,<C. Answer 3>,<D. Answer 4> |
|
|
... |
|
|
Answers: |
|
|
<Answer1>:<A|B|C|D> |
|
|
<Answer2>:<A|B|C|D> |
|
|
... |
|
|
Example: |
|
|
-Questions: |
|
|
-1.What is the time complexity of a binary search tree? |
|
|
A.O(n) |
|
|
B.O(log n) |
|
|
C.O(n^2) |
|
|
D.O(1) |
|
|
-Answers: |
|
|
1.B |
|
|
- True-false: |
|
|
Questions: |
|
|
<Question1>: <True|False> |
|
|
<Question2>: <True|False> |
|
|
... |
|
|
Answers: |
|
|
<Answer1>: <True|False> |
|
|
<Answer2>: <True|False> |
|
|
... |
|
|
Example: |
|
|
- Questions: |
|
|
- 1. Binary search trees are implemented using linked lists. |
|
|
- 2. The time complexity of a binary search tree is O(n). |
|
|
- Answers: |
|
|
- 1. False |
|
|
- 2. True |
|
|
- Open-ended: |
|
|
Questions: |
|
|
<Question1>: |
|
|
<Question2>: |
|
|
... |
|
|
Answers: |
|
|
<Answer1>: |
|
|
<Answer2>: |
|
|
Example: |
|
|
Questions: |
|
|
- 1. What is a binary search tree? |
|
|
- 2. Binary search trees are implemented using linked lists. |
|
|
- Answers: |
|
|
1. A binary search tree is a data structure that is used to store data in a sorted manner. |
|
|
2. Binary search trees are implemented using linked lists. |
|
|
""" |
|
|
return ChatPromptTemplate.from_template(template) |
|
|
|
|
|
def create_question_chain(prompt_template, llm): |
|
|
"""Creates the chain for the question generator app.""" |
|
|
return prompt_template | llm | StrOutputParser() |
|
|
|
|
|
def split_questions_answers(question_response): |
|
|
"""Function that splits the questions and answers from the question response.""" |
|
|
try: |
|
|
|
|
|
questions_section = question_response.split("Answers:")[0].strip() |
|
|
|
|
|
if "Answers:" in question_response: |
|
|
answers_section = question_response.split("Answers:")[1].strip() |
|
|
else: |
|
|
answers_section = "" |
|
|
|
|
|
|
|
|
formatted_questions = format_questions(questions_section) |
|
|
formatted_answers = format_answers(answers_section) |
|
|
|
|
|
return formatted_questions, formatted_answers |
|
|
except IndexError: |
|
|
return "Error: Unable to parse the response.", "" |
|
|
def format_questions(questions): |
|
|
"""Format questions to display with proper alignment and structure.""" |
|
|
lines = questions.split("\n") |
|
|
formatted = [] |
|
|
current_question = "" |
|
|
|
|
|
for line in lines: |
|
|
line = line.strip() |
|
|
if line.startswith(("1.", "2.", "3.", "4.", "5.")): |
|
|
if current_question: |
|
|
formatted.append(current_question.strip()) |
|
|
current_question = f"\n{line}" |
|
|
elif line.startswith(("A.", "B.", "C.", "D.")): |
|
|
current_question += f"\n {line}" |
|
|
elif line: |
|
|
current_question += f"\n {line}" |
|
|
|
|
|
|
|
|
if current_question: |
|
|
formatted.append(current_question.strip()) |
|
|
return("\n".join(formatted)) |
|
|
|
|
|
def format_answers(answers): |
|
|
"""Format answers to display with consistent alignment.""" |
|
|
lines = answers.split('\n') |
|
|
formatted = [] |
|
|
|
|
|
for line in lines: |
|
|
if line.strip(): |
|
|
formatted.append(line.strip()) |
|
|
|
|
|
return "\n".join(formatted) |
|
|
|
|
|
def generate_questions(context, num_questions, questions_type,difficulty_level): |
|
|
"""Function to generate questions.""" |
|
|
os.environ["OPENAI_API_KEY"] = "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" |
|
|
|
|
|
llm = ChatOpenAI(temperature=0.0) |
|
|
prompt_template = create_the_question_prompt_template(num_questions, questions_type, difficulty_level, context) |
|
|
chain = create_question_chain(prompt_template, llm) |
|
|
|
|
|
try: |
|
|
question_response = chain.invoke({"questions_type": questions_type, "num_questions": num_questions,"difficulty_level": difficulty_level, "context": context}) |
|
|
|
|
|
|
|
|
print("Question Response:", question_response) |
|
|
|
|
|
questions, answers = split_questions_answers(question_response) |
|
|
return questions, answers |
|
|
except Exception as e: |
|
|
return f"Error: {str(e)}", "" |
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks() as demo: |
|
|
gr.Markdown("# Quiz App - Generate Questions") |
|
|
context_input = gr.Textbox(label="Context/Concept", placeholder="Enter the concept for the questions") |
|
|
num_questions_input = gr.Slider(label="Number of Questions", minimum=1, maximum=5, value=2, step=1) |
|
|
question_type_input = gr.Radio(label="Quiz Type", choices=["multiple-choice", "true-false", "open-ended"], value="multiple-choice") |
|
|
dropdown = gr.Dropdown( |
|
|
choices=["Easy", "Medium", "Hard"], |
|
|
label="Select difficulty level ", |
|
|
value="Medium" |
|
|
) |
|
|
generate_btn = gr.Button("Generate Questions") |
|
|
with gr.Row(): |
|
|
questions_output = gr.Textbox(label="Generated Questions", lines=5) |
|
|
answers_output = gr.Textbox(label="Generated Answers", lines=5) |
|
|
|
|
|
generate_btn.click( |
|
|
generate_questions, |
|
|
inputs=[context_input, num_questions_input, question_type_input,dropdown], |
|
|
outputs=[questions_output, answers_output], |
|
|
) |
|
|
|
|
|
|
|
|
demo.launch() |