import streamlit as st from transformers import AutoTokenizer, AutoModelForQuestionAnswering, pipeline # Set page configuration st.set_page_config( page_title="Question Answering App", page_icon="❓", layout="centered", initial_sidebar_state="auto", ) # Page title with custom style st.markdown( """
Enter a context and question to get precise answers powered by AI.
""", unsafe_allow_html=True, ) # Sidebar for model settings and context input st.sidebar.header("Model Settings") model_checkpoint = st.sidebar.text_input( "Model Checkpoint", "Diezu/viedumrc", help="Specify the model checkpoint to use." ) model_checkpoint1 = 'Diezu/viedumrc' question_answerer = pipeline("question-answering", model=model_checkpoint1) st.sidebar.markdown( """ Using model:Diezu/viedumrc.
""",
unsafe_allow_html=True,
)
context_sidebar = st.sidebar.text_area(
"Context",
"",
help="Enter the context that contains information for answering questions.",
height=200,
placeholder="Provide context for your question...",
)
# # Load the tokenizer and model with error handling
# try:
# tokenizer = AutoTokenizer.from_pretrained(model_checkpoint)
# model = AutoModelForQuestionAnswering.from_pretrained(model_checkpoint)
# question_answerer = pipeline("question-answering", model=model, tokenizer=tokenizer)
# except Exception as e:
# st.error(f"Failed to load model or tokenizer: {e}", icon="🚨")
# st.stop()
# Main application
st.markdown(
"""