# -*- coding: utf-8 -*- """Untitled124.ipynb Automatically generated by Colab. Original file is located at https://colab.research.google.com/drive/1eRfHwY6L9mCxrk1jljgVTuofsuoZP3AN """ # Commented out IPython magic to ensure Python compatibility. # %%writefile app.py # import streamlit as st # from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline # # # Set up the title of the app # st.title("🤖 Chatbot with Mistral-7B-Instruct-v0.3") # # # Load the Mistral-7B-Instruct-v0.3 model # @st.cache_resource # Cache the model to avoid reloading on every interaction # def load_model(): # model_name = "mistralai/Mistral-7B-Instruct-v0.3" # tokenizer = AutoTokenizer.from_pretrained(model_name) # model = AutoModelForCausalLM.from_pretrained(model_name, device_map="auto", torch_dtype="auto") # return pipeline("text-generation", model=model, tokenizer=tokenizer) # # generator = load_model() # # # Initialize session state to store chat history # if "messages" not in st.session_state: # st.session_state.messages = [] # # # Display chat history # for message in st.session_state.messages: # with st.chat_message(message["role"]): # st.markdown(message["content"]) # # # Input from user # if prompt := st.chat_input("What is up?"): # # Add user message to chat history # st.session_state.messages.append({"role": "user", "content": prompt}) # with st.chat_message("user"): # st.markdown(prompt) # # # Generate a response using the Mistral model # with st.chat_message("assistant"): # # Format the prompt for instruction-following # formatted_prompt = f"[INST] {prompt} [/INST]" # response = generator(formatted_prompt, max_length=100, num_return_sequences=1)[0]["generated_text"] # # Extract only the assistant's response # assistant_response = response.split("[/INST]")[-1].strip() # st.markdown(assistant_response) # # # Add assistant response to chat history # st.session_state.messages.append({"role": "assistant", "content": assistant_response})