Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from transformers import pipeline | |
| # Choose a Flan-T5 model. "google/flan-t5-large" is a decent balance for CPU. | |
| # If it's slow, try "google/flan-t5-base". For higher quality, "google/flan-t5-xl" or "google/flan-ul2" (if resources allow). | |
| model_name = "google/flan-t5-large" | |
| # Create a text2text-generation pipeline with sampling parameters for some creativity. | |
| pipe = pipeline( | |
| "text2text-generation", | |
| model=model_name, | |
| do_sample=True, | |
| top_p=0.9, | |
| temperature=0.7 | |
| ) | |
| # A "system prompt" that instructs the model to provide structured, elaborate financial guidance. | |
| system_prompt = """You are a helpful AI assistant specialized in finance. | |
| You provide thorough, step-by-step, structured guidance using bullet points or headings if relevant. | |
| Offer disclaimers that this is not official financial advice, but well-researched educational content. | |
| Ensure your tone is clear, professional, and detailed.""" | |
| # Streamlit UI | |
| st.title("Flan-T5 Financial Advisor") | |
| st.write("Ask a financial question and receive a single, detailed response.") | |
| user_input = st.text_area("Enter your financial question here:") | |
| if st.button("Generate"): | |
| if user_input.strip(): | |
| # 1) Build a single-turn prompt with system instructions + user question | |
| prompt = f"{system_prompt}\n\nUser: {user_input}\nAssistant:" | |
| # 2) Generate a reply | |
| output = pipe(prompt, max_length=300) | |
| answer = output[0]["generated_text"].strip() | |
| # 3) Display the model's answer | |
| st.write("### Assistant's Response") | |
| st.write(answer) | |