import streamlit as st from transformers import pipeline # model repo ID model_id = "prd101-wd/phi1_5-bankingqa-merged" # Load model only once @st.cache_resource def load_model(): return pipeline("question-answering", model=model_id) # Create a text generation pipeline pipe = load_model() # Streamlit UI st.title("Banking HelpDesk from Finetuned Phi1-5") st.markdown("Ask a question and the fine-tuned Phi-1.5 model will answer.") user_input = st.text_area("Your question:", height=100) if st.button("Ask"): if user_input.strip(): # Format the prompt like Alpaca-style prompt = f"### Instruction:\n{user_input}\n\n### Response:\n" output = pipe(prompt, max_new_tokens=200, do_sample=True)[0]["generated_text"] # Extract only the model's response (remove prompt part if included in output) answer = output.split("### Response:")[-1].strip() st.markdown("### HelpdeskBot Answer:") st.success(answer) else: st.warning("Please enter a question.")