File size: 1,605 Bytes
bc1efdb
 
 
8edc97c
 
fc885e1
 
8edc97c
fc885e1
 
3becac1
 
 
 
 
 
8edc97c
 
 
ba7ddcb
8edc97c
ba7ddcb
8edc97c
 
 
bc1efdb
8edc97c
bc1efdb
8edc97c
bc1efdb
8edc97c
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
import streamlit as st
from transformers import pipeline

# Choose a Flan-T5 model. "google/flan-t5-large" is a decent balance for CPU.
# If it's slow, try "google/flan-t5-base". For higher quality, "google/flan-t5-xl" or "google/flan-ul2" (if resources allow).
model_name = "google/flan-t5-large"

# Create a text2text-generation pipeline with sampling parameters for some creativity.
pipe = pipeline(
    "text2text-generation",
    model=model_name,
    do_sample=True,
    top_p=0.9,
    temperature=0.7
)

# A "system prompt" that instructs the model to provide structured, elaborate financial guidance.
system_prompt = """You are a helpful AI assistant specialized in finance.
You provide thorough, step-by-step, structured guidance using bullet points or headings if relevant.
Offer disclaimers that this is not official financial advice, but well-researched educational content.
Ensure your tone is clear, professional, and detailed."""

# Streamlit UI
st.title("Flan-T5 Financial Advisor")
st.write("Ask a financial question and receive a single, detailed response.")

user_input = st.text_area("Enter your financial question here:")

if st.button("Generate"):
    if user_input.strip():
        # 1) Build a single-turn prompt with system instructions + user question
        prompt = f"{system_prompt}\n\nUser: {user_input}\nAssistant:"
        
        # 2) Generate a reply
        output = pipe(prompt, max_length=300)
        answer = output[0]["generated_text"].strip()
        
        # 3) Display the model's answer
        st.write("### Assistant's Response")
        st.write(answer)