|
|
import streamlit as st |
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline |
|
|
|
|
|
|
|
|
import logging, sys |
|
|
from dotenv import load_dotenv |
|
|
|
|
|
|
|
|
from huggingface_hub import login |
|
|
|
|
|
|
|
|
|
|
|
HF_TOKEN = st.secrets["HF_API_TOKEN"] |
|
|
login(token=HF_TOKEN) |
|
|
|
|
|
|
|
|
logging.basicConfig(stream=sys.stdout, level=logging.INFO) |
|
|
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout)) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
model_id = "mistralai/Mistral-7B-v0.1" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
pipe = pipeline('text-generation', model=model_id) |
|
|
|
|
|
with st.form('my_form'): |
|
|
question = st.text_area('Enter your question:', 'Tell me about attention mechanisms in a transformer?') |
|
|
submitted = st.form_submit_button('Submit') |
|
|
if submitted: |
|
|
result = pipe(question, max_length=100) |
|
|
st.write(question) |
|
|
st.write(result) |
|
|
|