Bloomsphere-app / helper2.py
jayantp2003's picture
Upload 8 files
078d100 verified
import google.generativeai as genai
from PIL import Image
from dotenv import load_dotenv
import os
load_dotenv()
genai.configure(api_key="AIzaSyCo0UmOYwiSKUD7-cCVg94M4U1xIKeoS00")
def extract_questions_from_image(image_path):
image = Image.open(image_path)
model = genai.GenerativeModel("gemini-1.5-flash")
prompt = (
"Analyze the provided image and extract all the questions present in it. "
"Return only the extracted questions in a structured format, as follows:\n\n"
"**Output Format:**\n"
"Q1: [First question]\n"
"Q2: [Second question]\n"
"Q3: [Third question]\n"
"... \n\n"
"If there are no questions in the image, return 'None' without any additional text."
)
response = model.generate_content([image, prompt])
return response.text.strip()
from PyPDF2 import PdfReader
import google.generativeai as genai
def extract_questions_from_pdf(pdf_path):
# Read text from the PDF
pdf_reader = PdfReader("sst.pdf")
text = "\n".join([page.extract_text() for page in pdf_reader.pages if page.extract_text()])
# Initialize the AI model
model = genai.GenerativeModel("gemini-1.5-flash")
prompt = (
"Analyze the provided text and extract all the questions present in it. "
"Return only the extracted questions in a structured format, as follows:\n\n"
"**Output Format:**\n"
"Q1: [First question]\n"
"Q2: [Second question]\n"
"Q3: [Third question]\n"
"... \n\n"
"If there are no questions in the text, return 'None' without any additional text."
)
# Generate response
response = model.generate_content([text, prompt])
return response.text.strip()
file_path = "data/sst.pdf" # Change this to your file path
if file_path.lower().endswith((".png", ".jpg", ".jpeg")):
result = extract_questions_from_image(file_path)
elif file_path.lower().endswith(".pdf"):
result = extract_questions_from_pdf(file_path)
else:
result = "Unsupported file format. Please use PNG, JPG, or PDF."
# Print the extracted questions
qw = result.split("\n")
from openai import OpenAI
client = OpenAI(
api_key=os.getenv('API_KEY'),
base_url=os.getenv('GENERATOR_BASE_URL'),
)
prompt = """
Analyze the following sentence and classify it according to Bloom's Taxonomy levels.
Return the results as a probability distribution where the sum of all 6 levels equals 1.
Bloom's Taxonomy Levels:
1. Remembering: Recall facts and basic concepts
2. Understanding: Explain ideas or concepts
3. Applying: Use information in new situations
4. Analyzing: Draw connections among ideas
5. Evaluating: Justify a stand or decision
6. Creating: Produce new or original work
For the given sentence, provide your assessment in JSON format with the following structure:
{{
"remembering": float,
"understanding": float,
"applying": float,
"analyzing": float,
"evaluating": float,
"creating": float
}}
Ensure that:
- Each value is between 0 and 1
- The sum of all six values equals exactly 1
- The distribution reflects the cognitive level required by the sentence
Sentence to analyze: "{input_sentence}"
Return only the JSON output without any additional explanation or commentary.
"""
import json
# iterate through q1 and pass it to get response
for i in qw:
formatted_prompt = prompt.format(input_sentence=i)
response = client.chat.completions.create(
model=os.getenv("MODEL_NAME"),
messages=[{"role": "user", "content": formatted_prompt}]
)
# Extract the content from the response
response_content = response.choices[0].message.content
# Clean the response (remove markdown code blocks if present)
json_str = response_content.strip().replace('```json', '').replace('```', '').strip()
try:
# Parse the JSON string
bloom_scores = json.loads(json_str)
print("\n")
print("sentence:", i)
print("Bloom Score:")
print(json.dumps(bloom_scores, indent=4)) # Pretty print the JSON
except json.JSONDecodeError as e:
print(f"Error parsing JSON: {e}")
print("Raw response:", response_content)