Spaces:
Sleeping
Sleeping
File size: 1,625 Bytes
1d8ed3b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 |
from app.backend.runpod_client import format_messages_as_prompt, run_prompt, clean_and_parse_json
from app.backend.messages_templates import toc_prompt, chapter_prompt, chapter_prompt_edgecase
import streamlit as st
def extract_chapters_from_toc(toc_text: str):
prompt = toc_prompt(toc_text)
# prompt = format_messages_as_prompt(messages) get rid of this
print("use prompt optimized for gemma3")
raw_output = run_prompt(prompt)
st.session_state['chapters_dict'] = clean_and_parse_json(raw_output)
def generate_questions_from_chapter(chunks, num_questions, max_questions=5):
prompt = chapter_prompt(contexts=chunks, num_questions=num_questions, max_questions=max_questions)
# prompt = format_messages_as_prompt(messages) get rid of this
print("use prompt optimized for gemma3")
raw_output = run_prompt(prompt)
try:
generated_questions = clean_and_parse_json(raw_output)
st.success("Questions generated successfully!")
return generated_questions
except:
print("Error parsing JSON")
def generate_questions_from_chapter_edgecase(chunks, num_questions, max_questions=5):
prompt = chapter_prompt_edgecase(grouped_chunks=chunks, num_questions=num_questions, max_questions=max_questions)
# prompt = format_messages_as_prompt(messages) get rid of this
print("use prompt optimized for gemma3")
raw_output = run_prompt(prompt)
try:
generated_questions = clean_and_parse_json(raw_output)
st.success("Questions generated successfully!")
return generated_questions
except:
print("Error parsing JSON")
|