Manith Marapperuma commited on
Commit
333c23e
·
verified ·
1 Parent(s): cd464bf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +24 -17
app.py CHANGED
@@ -1,23 +1,30 @@
 
1
  import streamlit as st
2
  from transformers import pipeline
3
 
4
- # Initialize the pipeline with the Mistral model
5
- generator = pipeline(model="bigscience/Mistral", model_kwargs={"use_auth_token": True})
6
 
7
- # Streamlit app setup
8
- st.title('Mistral LLM Explorer with Transformers')
9
- st.write('Enter your prompt below to interact with the Mistral model.')
 
 
10
 
11
- # User input
12
- user_input = st.text_area("Prompt:")
 
13
 
14
- # When the user presses the 'Submit' button
15
- if st.button('Submit'):
16
- if user_input:
17
- with st.spinner('Generating response...'):
18
- # Generate response from the Mistral model
19
- results = generator(user_input, max_length=100, clean_up_tokenization_spaces=True)
20
- # Display the generated text
21
- st.text_area("Response:", value=results[0]['generated_text'], height=300)
22
- else:
23
- st.warning('Please enter a prompt.')
 
 
 
 
1
+ import os
2
  import streamlit as st
3
  from transformers import pipeline
4
 
5
+ # Get the Hugging Face API token from the environment variable
6
+ HF_API_TOKEN = os.getenv("HF_API_TOKEN")
7
 
8
+ if not HF_API_TOKEN:
9
+ st.error("Hugging Face API token not found. Please set the HF_API_TOKEN environment variable.")
10
+ else:
11
+ # Initialize the pipeline with the Mistral model and your API token
12
+ generator = pipeline(model="bigscience/Mistral", model_kwargs={"use_auth_token": HF_API_TOKEN})
13
 
14
+ # Streamlit app setup
15
+ st.title('Mistral LLM Explorer with Transformers')
16
+ st.write('Enter your prompt below to interact with the Mistral model.')
17
 
18
+ # User input
19
+ user_input = st.text_area("Prompt:")
20
+
21
+ # When the user presses the 'Submit' button
22
+ if st.button('Submit'):
23
+ if user_input:
24
+ with st.spinner('Generating response...'):
25
+ # Generate response from the Mistral model
26
+ results = generator(user_input, max_length=100, clean_up_tokenization_spaces=True)
27
+ # Display the generated text
28
+ st.text_area("Response:", value=results[0]['generated_text'], height=300)
29
+ else:
30
+ st.warning('Please enter a prompt.')