Commit
·
01b8683
1
Parent(s):
3f2e00e
changed streamlit app
Browse files
app.py
CHANGED
|
@@ -1,51 +1,25 @@
|
|
| 1 |
import streamlit as st
|
| 2 |
-
|
| 3 |
-
from transformers import AutoTokenizer, AutoModelForCausalLM
|
| 4 |
-
from transformers import pipeline
|
| 5 |
-
import os
|
| 6 |
-
import logging, sys
|
| 7 |
-
from dotenv import load_dotenv
|
| 8 |
-
|
| 9 |
-
from huggingface_hub import login
|
| 10 |
-
#load_dotenv()
|
| 11 |
-
|
| 12 |
-
#HF_TOKEN = os.environ.get("HF_API_TOKEN")
|
| 13 |
-
HF_TOKEN = st.secrets["HF_API_TOKEN"]
|
| 14 |
-
login(token=HF_TOKEN)
|
| 15 |
-
|
| 16 |
-
# Setup logging
|
| 17 |
-
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
|
| 18 |
-
logging.getLogger().addHandler(logging.StreamHandler(stream=sys.stdout))
|
| 19 |
-
|
| 20 |
-
|
| 21 |
|
| 22 |
# Load the tokenizer and model
|
| 23 |
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1")
|
| 24 |
model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1")
|
| 25 |
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
question =st.text_input("enter your question","tell me about transformer.")
|
| 48 |
-
|
| 49 |
-
# Generate text using the pipeline
|
| 50 |
-
result = pipe(question, max_length=50, truncation=True)
|
| 51 |
-
print(result)
|
|
|
|
| 1 |
import streamlit as st
|
| 2 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 3 |
|
| 4 |
# Load the tokenizer and model
|
| 5 |
tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-v0.1")
|
| 6 |
model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-v0.1")
|
| 7 |
|
| 8 |
+
# Create text generation pipeline
|
| 9 |
+
pipe = pipeline("text-generation", model=model, tokenizer=tokenizer)
|
| 10 |
+
|
| 11 |
+
# Continuously prompt the user for questions
|
| 12 |
+
while True:
|
| 13 |
+
# User input and chat interaction
|
| 14 |
+
prompt = st.text_input("Please ask your question here (or type 'exit' to quit):")
|
| 15 |
+
if prompt.lower() == 'exit':
|
| 16 |
+
break
|
| 17 |
+
|
| 18 |
+
# Display user message in chat message container
|
| 19 |
+
st.markdown("<b style='color:Red;'>User:</b> {}".format(prompt), unsafe_allow_html=True)
|
| 20 |
+
|
| 21 |
+
# Generate response
|
| 22 |
+
result = pipe(prompt, max_length=50, truncation=True)
|
| 23 |
+
|
| 24 |
+
# Display bot response in chat message container
|
| 25 |
+
st.markdown("<b style='color:Green;'>Bot:</b> {}".format(result[0]['generated_text']), unsafe_allow_html=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|