Update app.py
Browse files
app.py
CHANGED
|
@@ -4,9 +4,9 @@ from langchain.prompts import ChatPromptTemplate
|
|
| 4 |
from langchain_community.chat_models import ChatOpenAI
|
| 5 |
from langchain.schema import StrOutputParser
|
| 6 |
|
| 7 |
-
def create_the_question_prompt_template(num_questions, questions_type, context):
|
| 8 |
"""Create the prompt template for the questions generator app."""
|
| 9 |
-
template = f"""Create {num_questions} {questions_type} questions about the following concept/contents: {context}.
|
| 10 |
The format of the question could be one of the following:
|
| 11 |
- Multiple-choice:
|
| 12 |
Questions:
|
|
@@ -103,7 +103,7 @@ def format_questions(questions):
|
|
| 103 |
if current_question:
|
| 104 |
formatted.append(current_question.strip())
|
| 105 |
return("\n".join(formatted))
|
| 106 |
-
|
| 107 |
def format_answers(answers):
|
| 108 |
"""Format answers to display with consistent alignment."""
|
| 109 |
lines = answers.split('\n')
|
|
@@ -115,16 +115,16 @@ def format_answers(answers):
|
|
| 115 |
|
| 116 |
return "\n".join(formatted)
|
| 117 |
|
| 118 |
-
def generate_questions(context, num_questions, questions_type):
|
| 119 |
"""Function to generate questions."""
|
| 120 |
os.environ["OPENAI_API_KEY"] = "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"
|
| 121 |
|
| 122 |
llm = ChatOpenAI(temperature=0.0)
|
| 123 |
-
prompt_template = create_the_question_prompt_template(num_questions, questions_type, context)
|
| 124 |
chain = create_question_chain(prompt_template, llm)
|
| 125 |
|
| 126 |
try:
|
| 127 |
-
question_response = chain.invoke({"questions_type": questions_type, "num_questions": num_questions, "context": context})
|
| 128 |
|
| 129 |
# Log the entire response for debugging
|
| 130 |
print("Question Response:", question_response)
|
|
@@ -141,6 +141,11 @@ with gr.Blocks() as demo:
|
|
| 141 |
context_input = gr.Textbox(label="Context/Concept", placeholder="Enter the concept for the questions")
|
| 142 |
num_questions_input = gr.Slider(label="Number of Questions", minimum=1, maximum=5, value=2, step=1)
|
| 143 |
question_type_input = gr.Radio(label="Quiz Type", choices=["multiple-choice", "true-false", "open-ended"], value="multiple-choice")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 144 |
generate_btn = gr.Button("Generate Questions")
|
| 145 |
with gr.Row():
|
| 146 |
questions_output = gr.Textbox(label="Generated Questions", lines=5)
|
|
@@ -148,11 +153,9 @@ with gr.Blocks() as demo:
|
|
| 148 |
|
| 149 |
generate_btn.click(
|
| 150 |
generate_questions,
|
| 151 |
-
inputs=[context_input, num_questions_input, question_type_input],
|
| 152 |
outputs=[questions_output, answers_output],
|
| 153 |
)
|
| 154 |
|
| 155 |
# Launch the app
|
| 156 |
-
demo.launch()
|
| 157 |
-
|
| 158 |
-
|
|
|
|
| 4 |
from langchain_community.chat_models import ChatOpenAI
|
| 5 |
from langchain.schema import StrOutputParser
|
| 6 |
|
| 7 |
+
def create_the_question_prompt_template(num_questions, questions_type, difficulty_level, context):
|
| 8 |
"""Create the prompt template for the questions generator app."""
|
| 9 |
+
template = f"""Create {num_questions} {questions_type} questions keeping difficulty level as {difficulty_level} about the following concept/contents: {context}.
|
| 10 |
The format of the question could be one of the following:
|
| 11 |
- Multiple-choice:
|
| 12 |
Questions:
|
|
|
|
| 103 |
if current_question:
|
| 104 |
formatted.append(current_question.strip())
|
| 105 |
return("\n".join(formatted))
|
| 106 |
+
|
| 107 |
def format_answers(answers):
|
| 108 |
"""Format answers to display with consistent alignment."""
|
| 109 |
lines = answers.split('\n')
|
|
|
|
| 115 |
|
| 116 |
return "\n".join(formatted)
|
| 117 |
|
| 118 |
+
def generate_questions(context, num_questions, questions_type,difficulty_level):
|
| 119 |
"""Function to generate questions."""
|
| 120 |
os.environ["OPENAI_API_KEY"] = "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"
|
| 121 |
|
| 122 |
llm = ChatOpenAI(temperature=0.0)
|
| 123 |
+
prompt_template = create_the_question_prompt_template(num_questions, questions_type, difficulty_level, context)
|
| 124 |
chain = create_question_chain(prompt_template, llm)
|
| 125 |
|
| 126 |
try:
|
| 127 |
+
question_response = chain.invoke({"questions_type": questions_type, "num_questions": num_questions,"difficulty_level": difficulty_level, "context": context})
|
| 128 |
|
| 129 |
# Log the entire response for debugging
|
| 130 |
print("Question Response:", question_response)
|
|
|
|
| 141 |
context_input = gr.Textbox(label="Context/Concept", placeholder="Enter the concept for the questions")
|
| 142 |
num_questions_input = gr.Slider(label="Number of Questions", minimum=1, maximum=5, value=2, step=1)
|
| 143 |
question_type_input = gr.Radio(label="Quiz Type", choices=["multiple-choice", "true-false", "open-ended"], value="multiple-choice")
|
| 144 |
+
dropdown = gr.Dropdown(
|
| 145 |
+
choices=["Easy", "Medium", "Hard"], # Options in the selection box
|
| 146 |
+
label="Select difficulty level ",
|
| 147 |
+
value="Medium" # Default value
|
| 148 |
+
)
|
| 149 |
generate_btn = gr.Button("Generate Questions")
|
| 150 |
with gr.Row():
|
| 151 |
questions_output = gr.Textbox(label="Generated Questions", lines=5)
|
|
|
|
| 153 |
|
| 154 |
generate_btn.click(
|
| 155 |
generate_questions,
|
| 156 |
+
inputs=[context_input, num_questions_input, question_type_input,dropdown],
|
| 157 |
outputs=[questions_output, answers_output],
|
| 158 |
)
|
| 159 |
|
| 160 |
# Launch the app
|
| 161 |
+
demo.launch()
|
|
|
|
|
|