Manith Marapperuma commited on
Commit
0edd432
·
verified ·
1 Parent(s): 1c64285

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -15
app.py CHANGED
@@ -1,20 +1,28 @@
 
1
  import streamlit as st
2
- from transformers import pipeline
 
3
 
4
- # Initialize the pipeline with your chosen model
5
- pipe = pipeline("text-generation", model="mistralai/Mistral-7B-Instruct-v0.2")
 
 
6
 
7
- # Streamlit app layout
8
- st.title("Mistral Chatbot")
 
 
 
9
 
10
- # User input
11
- user_input = st.text_input("You:", "")
 
 
 
 
 
 
 
12
 
13
- if st.button("Send"):
14
- if user_input:
15
- # Generate a response
16
- response = pipe(user_input, max_length=50, clean_up_tokenization_spaces=True)
17
- # Display the response
18
- st.text_area("Chatbot:", value=response[0]['generated_text'], height=100, max_chars=None, key=None)
19
- else:
20
- st.warning("Please enter some text to chat.")
 
1
+ # Import necessary libraries
2
  import streamlit as st
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
+ import torch
5
 
6
+ # Load the model and tokenizer
7
+ model_name = "mistralai/Mistral-7B-v0.1"
8
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
9
+ model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto")
10
 
11
+ # Streamlit app
12
+ def main():
13
+ st.title("Mistral Chatbot")
14
+
15
+ user_input = st.text_input("You: ", "Hello, chatbot!")
16
 
17
+ if st.button("Send"):
18
+ with st.spinner("Thinking..."):
19
+ # Tokenize the user input and generate a response
20
+ model_inputs = tokenizer(user_input, return_tensors="pt")
21
+ model_inputs = {k: v.to("cuda") for k, v in model_inputs.items()}
22
+ generated_ids = model.generate(**model_inputs, max_new_tokens=100, do_sample=True)
23
+ chatbot_response = tokenizer.decode(generated_ids[0], skip_special_tokens=True)
24
+
25
+ st.text_area("Chatbot:", value=chatbot_response, height=200, max_chars=None, key=None)
26
 
27
+ if __name__ == "__main__":
28
+ main()