Manith Marapperuma commited on
Commit
0ed3d5c
·
verified ·
1 Parent(s): 830fd5e

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +31 -18
app.py CHANGED
@@ -1,30 +1,43 @@
1
  import streamlit as st
2
- from transformers import pipeline
3
 
4
- # Load the Mistral-7B model using pipeline
5
- pipe = pipeline("text-generation", model="mistralai/Mistral-7B-Instruct-v0.2")
 
 
 
 
 
6
 
7
  def generate_response(prompt):
8
- """Generates a response using the Mistral-7B model."""
9
- response = pipe(prompt, max_length=1000, num_return_sequences=1)[0]["generated_text"]
10
- return response.strip()
 
 
 
 
 
 
 
 
 
 
 
 
11
 
12
  # Streamlit app layout
13
  st.title("Mistral Chatbot")
14
- user_input = st.text_input("Ask me anything!")
 
 
15
 
16
  if user_input:
 
 
 
 
17
  response = generate_response(user_input)
 
18
  st.write(f"Mistral: {response}")
19
 
20
- # Deployment to Hugging Face Spaces (instructions included)
21
- # 1. Create a Hugging Face account (if you don't have one)
22
- # 2. Create a new Space from your account
23
- # 3. Push your code to a Git repository (e.g., GitHub)
24
- # 4. In your Space settings, connect your Git repository
25
- # 5. Under "Model", select the Mistral-7B model you're using
26
- # 6. Under "Environment", create a new environment with Python 3.7+
27
- # 7. Under "Requirements", add "streamlit transformers" (separate lines)
28
- # 8. Under "Start script", enter "streamlit run app.py" (replace app.py with your filename)
29
- # 9. Deploy your Space!
30
-
 
1
  import streamlit as st
2
+ from transformers import AutoTokenizer, AutoModelForCausalLM
3
 
4
+ # Load the model and tokenizer
5
+ tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2")
6
+ model = AutoModelForCausalLM.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2")
7
+
8
+ # Initialize session state to store chat history
9
+ if "chat_history" not in st.session_state:
10
+ st.session_state["chat_history"] = []
11
 
12
  def generate_response(prompt):
13
+ """Generates a response from the model based on the prompt."""
14
+ input_ids = tokenizer.encode(prompt + tokenizer.eos_token, return_tensors="pt")
15
+ beam_output = model.generate(
16
+ input_ids,
17
+ max_length=50,
18
+ num_beams=5,
19
+ no_repeat_ngram_size=2,
20
+ early_stopping=True
21
+ )
22
+ return tokenizer.decode(beam_output[0], skip_special_tokens=True)
23
+
24
+ def display_chat_history():
25
+ """Displays the chat history in the Streamlit app."""
26
+ for message in st.session_state["chat_history"]:
27
+ st.write(message)
28
 
29
  # Streamlit app layout
30
  st.title("Mistral Chatbot")
31
+ display_chat_history()
32
+
33
+ user_input = st.text_input("You:")
34
 
35
  if user_input:
36
+ # Add user input to chat history
37
+ st.session_state["chat_history"].append(f"You: {user_input}")
38
+
39
+ # Generate response from the model
40
  response = generate_response(user_input)
41
+ st.session_state["chat_history"].append(f"Mistral: {response}")
42
  st.write(f"Mistral: {response}")
43