File size: 1,069 Bytes
44a2309 01b8683 44a2309 e132e5c 3605144 e132e5c 3605144 15e4fce 3605144 1ac30bc 44a2309 01b8683 e7ca75f 6d6488b b325ed3 1264fc9 73e1ba2 1264fc9 e7ca75f |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 |
import streamlit as st
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
import logging, sys
from dotenv import load_dotenv
from huggingface_hub import login
#load_dotenv()
#HF_TOKEN = os.environ.get("HF_API_TOKEN")
HF_TOKEN = st.secrets["HF_API_TOKEN"]
login(token=HF_TOKEN)
# Setup logging
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
#model_id = "meta-llama/Meta-Llama-3-8B"
model_id = "mistralai/Mistral-7B-v0.1"
#tokenizer = AutoTokenizer.from_pretrained(model_id)
#model = AutoModelForCausalLM.from_pretrained(model_id)
# Create text generation pipeline
#pipe = pipeline(model = model_id)
pipe = pipeline('text-generation', model=model_id)
with st.form('my_form'):
question = st.text_area('Enter your question:', 'Tell me about attention mechanisms in a transformer?')
submitted = st.form_submit_button('Submit')
if submitted:
result = pipe(question, max_length=100)
st.write(question)
st.write(result)
|