John Best
First Draft
37a267f
raw
history blame contribute delete
699 Bytes
import streamlit as st
from transformers import AutoTokenizer, AutoModelForCausalLM
# Load the fine-tuned model
model_name = "your_fine_tuned_model_directory"
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
def generate_response(prompt):
inputs = tokenizer.encode(prompt, return_tensors="pt")
outputs = model.generate(inputs, max_length=150, num_return_sequences=1)
response = tokenizer.decode(outputs[0])
return response
st.title("Fine-Tuned Personal Finance Assistant")
prompt = st.text_input("Ask a question:")
response = ""
if st.button("Generate"):
response = generate_response(prompt)
st.write(response)