namantjeaswi's picture
update app.py
6d6488b
import streamlit as st
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
import logging, sys
from dotenv import load_dotenv
from huggingface_hub import login
#load_dotenv()
#HF_TOKEN = os.environ.get("HF_API_TOKEN")
HF_TOKEN = st.secrets["HF_API_TOKEN"]
login(token=HF_TOKEN)
# Setup logging
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
#model_id = "meta-llama/Meta-Llama-3-8B"
model_id = "mistralai/Mistral-7B-v0.1"
#tokenizer = AutoTokenizer.from_pretrained(model_id)
#model = AutoModelForCausalLM.from_pretrained(model_id)
# Create text generation pipeline
#pipe = pipeline(model = model_id)
pipe = pipeline('text-generation', model=model_id)
with st.form('my_form'):
question = st.text_area('Enter your question:', 'Tell me about attention mechanisms in a transformer?')
submitted = st.form_submit_button('Submit')
if submitted:
result = pipe(question, max_length=100)
st.write(question)
st.write(result)