llama3 / app.py
Nikhitha2310's picture
Update app.py
64f6ff3 verified
import streamlit as st
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
# Load the model and tokenizer from Hugging Face
@st.cache_resource
def load_model():
model_name = "meta-llama/Meta-Llama-3.1-8B-Instruct" # Replace with your model name
tokenizer = AutoTokenizer.from_pretrained(model_name)
model = AutoModelForCausalLM.from_pretrained(model_name)
return model, tokenizer
model, tokenizer = load_model()
# Create the pipeline for text generation
generator = pipeline("text-generation", model=model, tokenizer=tokenizer)
# Streamlit app title
st.title("Question Answering with Hugging Face Model")
# User input
question = st.text_input("Enter your question:")
# Button to generate the answer
if st.button("Generate Answer"):
if question:
prompt = f"Question: {question}\nAnswer: Let's think step by step."
result = generator(prompt, max_length=100, do_sample=True, top_k=10)
st.text_area("Generated Answer:", value=result[0]['generated_text'], height=200)
else:
st.warning("Please enter a question.")