Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -10,28 +10,27 @@ MODEL_NAME = "Alaaeldin/pubmedBERT-demo"
|
|
| 10 |
|
| 11 |
@st.cache_resource
|
| 12 |
def load_pipeline():
|
| 13 |
-
|
|
|
|
| 14 |
|
| 15 |
# Load the pipeline
|
| 16 |
qa_pipeline = load_pipeline()
|
| 17 |
|
| 18 |
# Streamlit app UI
|
| 19 |
st.title("PubMed BERT Q&A App")
|
| 20 |
-
st.write("Ask questions based on
|
| 21 |
|
| 22 |
-
# User
|
| 23 |
-
context = st.text_area("Enter the biomedical context (e.g., PubMed abstract):", height=200)
|
| 24 |
question = st.text_input("Enter your question:")
|
| 25 |
|
| 26 |
# Button to get the answer
|
| 27 |
if st.button("Get Answer"):
|
| 28 |
-
if
|
| 29 |
-
with st.spinner("
|
| 30 |
-
result = qa_pipeline(question=
|
| 31 |
-
st.success(f"Answer: {result['
|
| 32 |
-
st.write(f"Confidence: {result['score']:.2f}")
|
| 33 |
else:
|
| 34 |
-
st.warning("Please
|
| 35 |
|
| 36 |
# Footer
|
| 37 |
st.markdown("---")
|
|
|
|
| 10 |
|
| 11 |
@st.cache_resource
|
| 12 |
def load_pipeline():
|
| 13 |
+
# Use the pipeline for question-answering
|
| 14 |
+
return pipeline("text-generation", model=MODEL_NAME, tokenizer=MODEL_NAME, use_auth_token=access_token)
|
| 15 |
|
| 16 |
# Load the pipeline
|
| 17 |
qa_pipeline = load_pipeline()
|
| 18 |
|
| 19 |
# Streamlit app UI
|
| 20 |
st.title("PubMed BERT Q&A App")
|
| 21 |
+
st.write("Ask questions directly based on the model's training!")
|
| 22 |
|
| 23 |
+
# User input for the question
|
|
|
|
| 24 |
question = st.text_input("Enter your question:")
|
| 25 |
|
| 26 |
# Button to get the answer
|
| 27 |
if st.button("Get Answer"):
|
| 28 |
+
if question.strip():
|
| 29 |
+
with st.spinner("Generating the answer..."):
|
| 30 |
+
result = qa_pipeline(question, max_length=100, num_return_sequences=1)
|
| 31 |
+
st.success(f"Answer: {result[0]['generated_text']}")
|
|
|
|
| 32 |
else:
|
| 33 |
+
st.warning("Please enter a question.")
|
| 34 |
|
| 35 |
# Footer
|
| 36 |
st.markdown("---")
|