|
|
import streamlit as st |
|
|
from datasets import load_dataset |
|
|
|
|
|
|
|
|
from transformers import AutoTokenizer, AutoModelForQuestionAnswering, pipeline, AutoModelForQuestionAnswering |
|
|
import torch |
|
|
model_path = "./kaggle-3/working/bert_qa" |
|
|
|
|
|
|
|
|
tokenizer_new = AutoTokenizer.from_pretrained(model_path) |
|
|
model_new = AutoModelForQuestionAnswering.from_pretrained(model_path) |
|
|
|
|
|
def ask(question: str, context: str) -> str: |
|
|
inputs = tokenizer_new(question, context, max_length=384, |
|
|
truncation="only_second", padding="max_length", return_tensors="pt") |
|
|
with torch.no_grad(): |
|
|
outputs = model_new(**inputs) |
|
|
|
|
|
answer_start_index = outputs.start_logits.argmax() |
|
|
answer_end_index = outputs.end_logits.argmax() |
|
|
|
|
|
predict_answer_tokens = inputs.input_ids[0, answer_start_index : answer_end_index + 1] |
|
|
answer = tokenizer_new.decode(predict_answer_tokens) |
|
|
return answer |
|
|
return f"Question: '{question}'\nAnswer: {answer}" |
|
|
|
|
|
|
|
|
|
|
|
st.set_page_config(page_title="Bible Q&A Bot", page_icon="📖", layout="centered") |
|
|
|
|
|
st.title("📖 Bible Q&A Bot") |
|
|
st.markdown("## Ask any question about the Bible and get scripturally grounded answers.") |
|
|
st.write("‼️Only english🇺🇸 language provided‼️") |
|
|
|
|
|
|
|
|
|
|
|
query = st.text_input("Enter your question:") |
|
|
|
|
|
clear_button = st.button("Clear") |
|
|
if clear_button: |
|
|
for key in st.session_state.keys(): |
|
|
del st.session_state[key] |
|
|
|
|
|
st.markdown('### Choose option to provide context') |
|
|
option = st.radio("Choose how to provide context:", ("Manually", "Select Bible Verse"), label_visibility="collapsed") |
|
|
|
|
|
def print_answer(question, context, answer): |
|
|
if context.isascii() and question.isascii(): |
|
|
st.markdown("### ❓Question❓") |
|
|
st.write(question) |
|
|
st.markdown("### 📖Context📖") |
|
|
st.write(context) |
|
|
st.markdown("### ✅Answer✅") |
|
|
st.write(answer) |
|
|
else: |
|
|
st.error("Please ensure both the question and context are in English.") |
|
|
|
|
|
|
|
|
import pandas as pd |
|
|
|
|
|
|
|
|
|
|
|
bible = pd.read_json("hf://datasets/nbeerbower/bible-dpo/bible-dpo.json") |
|
|
|
|
|
books = list(bible['book'].unique()) |
|
|
v_by_b_c = bible.groupby(by=['book', 'chapter']).size().to_dict() |
|
|
|
|
|
if option == "Manually": |
|
|
context = st.text_area("Enter the context (optional):") |
|
|
submit_button = st.button("Get Answer") |
|
|
|
|
|
if submit_button and query: |
|
|
with st.spinner("Searching Scripture..."): |
|
|
answer = ask(query, context) |
|
|
|
|
|
print_answer(query, context, answer) |
|
|
|
|
|
|
|
|
elif option == "Select Bible Verse": |
|
|
|
|
|
|
|
|
book_name = st.selectbox("Select the book name:", [""] + books) |
|
|
|
|
|
if book_name: |
|
|
max_chapter = len(bible[bible["book"] == book_name].groupby('chapter')) |
|
|
chapter = st.selectbox("Select the chapter:", [""] + list(range(1, max_chapter + 1))) |
|
|
else: |
|
|
chapter = st.selectbox("Select the chapter:", [""]) |
|
|
|
|
|
if chapter: |
|
|
max_verse = v_by_b_c.get((book_name, int(chapter)), 0) |
|
|
verse = st.selectbox("Enter the verse:", [""] + list(range(1, max_verse + 1))) |
|
|
else: |
|
|
verse = st.selectbox("Enter the verse:", [""]) |
|
|
|
|
|
fetch_context_button = st.button("Fetch Verse") |
|
|
|
|
|
if query and fetch_context_button and book_name and chapter and verse: |
|
|
context = bible[ |
|
|
(bible["book"] == book_name) & |
|
|
(bible['chapter'] == chapter) & |
|
|
(bible['verse'] == verse) |
|
|
]['text'].values[0] |
|
|
with st.spinner("Searching Scripture..."): |
|
|
answer = ask(query, context) |
|
|
|
|
|
print_answer(query, context, answer) |
|
|
|
|
|
|
|
|
|
|
|
|