Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import streamlit as st
|
| 2 |
+
from sentence_transformers import SentenceTransformer
|
| 3 |
+
from transformers import pipeline, AutoTokenizer, AutoModelForCausalLM
|
| 4 |
+
import time
|
| 5 |
+
import sys
|
| 6 |
+
import os
|
| 7 |
+
import json
|
| 8 |
+
|
| 9 |
+
# Local imports
|
| 10 |
+
sys.path.append(os.path.abspath(os.path.join(os.path.dirname(__file__), "..")))
|
| 11 |
+
from utils.chunking import smart_chunk_text
|
| 12 |
+
from utils.retriever import HybridRetriever
|
| 13 |
+
from utils.generator import generate_answer
|
| 14 |
+
from utils.evaluation import evaluate_response
|
| 15 |
+
from utils.guardrails import validate_input, validate_output
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
# ---------------------------
|
| 19 |
+
# Streamlit Page Config
|
| 20 |
+
# ---------------------------
|
| 21 |
+
st.set_page_config(page_title="Allstate Financial QA")
|
| 22 |
+
st.title("π Allstate Financial QA System")
|
| 23 |
+
|
| 24 |
+
|
| 25 |
+
# ---------------------------
|
| 26 |
+
# Cached Loaders
|
| 27 |
+
# ---------------------------
|
| 28 |
+
@st.cache_resource
|
| 29 |
+
def load_retriever():
|
| 30 |
+
texts = []
|
| 31 |
+
for file in os.listdir("data/processed"):
|
| 32 |
+
if file.endswith(".txt") or file.endswith(".json"):
|
| 33 |
+
with open(os.path.join("data/processed", file), "r") as f:
|
| 34 |
+
texts.append(f.read())
|
| 35 |
+
chunks = smart_chunk_text(texts, chunk_size=100)
|
| 36 |
+
embedder = SentenceTransformer("all-MiniLM-L6-v2")
|
| 37 |
+
return HybridRetriever(chunks, embedder)
|
| 38 |
+
|
| 39 |
+
|
| 40 |
+
@st.cache_resource
|
| 41 |
+
def load_finetuned_model():
|
| 42 |
+
tokenizer = AutoTokenizer.from_pretrained("jayyd/financial-qa-model")
|
| 43 |
+
model = AutoModelForCausalLM.from_pretrained("jayyd/financial-qa-model")
|
| 44 |
+
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
| 45 |
+
return pipe
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
# ---------------------------
|
| 49 |
+
# UI Inputs
|
| 50 |
+
# ---------------------------
|
| 51 |
+
query = st.text_input("Ask a financial question")
|
| 52 |
+
method = st.radio("Choose method:", ["RAG", "Fine-Tuned"])
|
| 53 |
+
|
| 54 |
+
|
| 55 |
+
# ---------------------------
|
| 56 |
+
# Main App Logic
|
| 57 |
+
# ---------------------------
|
| 58 |
+
if query:
|
| 59 |
+
# Validate input
|
| 60 |
+
is_valid, message = validate_input(query)
|
| 61 |
+
if not is_valid:
|
| 62 |
+
st.error(message)
|
| 63 |
+
st.stop()
|
| 64 |
+
|
| 65 |
+
start_time = time.time()
|
| 66 |
+
|
| 67 |
+
if method == "RAG":
|
| 68 |
+
with st.spinner("Retrieving and generating answer..."):
|
| 69 |
+
retriever = load_retriever()
|
| 70 |
+
chunks = retriever.retrieve(query)
|
| 71 |
+
# Extractive only
|
| 72 |
+
answer, supporting_context = generate_answer(query, chunks)
|
| 73 |
+
|
| 74 |
+
# Evaluate
|
| 75 |
+
metrics = evaluate_response(query, answer, chunks)
|
| 76 |
+
confidence = metrics.get("confidence", 0.0)
|
| 77 |
+
is_valid, message = validate_output(answer, confidence)
|
| 78 |
+
|
| 79 |
+
# Show answer
|
| 80 |
+
st.subheader("Answer")
|
| 81 |
+
st.write(answer)
|
| 82 |
+
|
| 83 |
+
if not is_valid:
|
| 84 |
+
st.warning(message)
|
| 85 |
+
|
| 86 |
+
# Supporting context (expandable)
|
| 87 |
+
with st.expander("Supporting Context"):
|
| 88 |
+
st.write(supporting_context)
|
| 89 |
+
|
| 90 |
+
# Sidebar metrics
|
| 91 |
+
response_time = time.time() - start_time
|
| 92 |
+
st.sidebar.markdown("### Response Metrics")
|
| 93 |
+
st.sidebar.markdown(f"Response Time: {response_time:.2f}s")
|
| 94 |
+
st.sidebar.markdown(f"Confidence Score: {confidence:.2f}")
|
| 95 |
+
st.sidebar.markdown(f"Number of Retrieved Chunks: {metrics.get('num_chunks', 0)}")
|
| 96 |
+
st.sidebar.markdown(f"Chunk Relevance Score: {metrics.get('chunk_relevance', 0):.2f}")
|
| 97 |
+
|
| 98 |
+
else:
|
| 99 |
+
with st.spinner("Generating answer from fine-tuned model..."):
|
| 100 |
+
pipe = load_finetuned_model()
|
| 101 |
+
prompt = f"Q: {query}\nA:"
|
| 102 |
+
raw_output = pipe(prompt, max_new_tokens=100)[0]["generated_text"]
|
| 103 |
+
|
| 104 |
+
# Clean the output: remove prompt part
|
| 105 |
+
output = raw_output.split("A:")[-1].strip()
|
| 106 |
+
|
| 107 |
+
# Evaluate
|
| 108 |
+
metrics = evaluate_response(query, output)
|
| 109 |
+
confidence = metrics.get("confidence", 0.0)
|
| 110 |
+
is_valid, message = validate_output(output, confidence)
|
| 111 |
+
|
| 112 |
+
# Show answer
|
| 113 |
+
st.subheader("Answer")
|
| 114 |
+
st.write(output)
|
| 115 |
+
|
| 116 |
+
if not is_valid:
|
| 117 |
+
st.warning(message)
|
| 118 |
+
|
| 119 |
+
# Sidebar metrics
|
| 120 |
+
response_time = time.time() - start_time
|
| 121 |
+
st.sidebar.markdown("### Response Metrics")
|
| 122 |
+
st.sidebar.markdown(f"Response Time: {response_time:.2f}s")
|
| 123 |
+
st.sidebar.markdown(f"Confidence Score: {confidence:.2f}")
|
| 124 |
+
st.sidebar.markdown(f"Answer Length: {metrics.get('answer_length', 0)} words")
|