Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| from transformers import AutoModelForCausalLM, AutoTokenizer | |
| import torch | |
| # Load the model and tokenizer | |
| model_name = "Qwen/Qwen2.5-1.5B-Instruct" | |
| model = AutoModelForCausalLM.from_pretrained( | |
| model_name, | |
| torch_dtype="auto", | |
| device_map="auto" | |
| ) | |
| tokenizer = AutoTokenizer.from_pretrained(model_name) | |
| # Initialize chat history | |
| if "messages" not in st.session_state: | |
| st.session_state.messages = [ | |
| {"role": "system", "content": "You are a helpful assistant."} | |
| ] | |
| # Display chat messages from history on app rerun | |
| for message in st.session_state.messages: | |
| with st.chat_message(message["role"]): | |
| st.markdown(message["content"]) | |
| # Accept user input | |
| if prompt := st.chat_input("Ask me anything about data structures in LeetCode"): | |
| # Add user message to chat history | |
| st.session_state.messages.append({"role": "user", "content": prompt}) | |
| # Display user message in chat message container | |
| with st.chat_message("user"): | |
| st.markdown(prompt) | |
| # Prepare the chat message for the model | |
| messages = st.session_state.messages[-10:] # limit messages to last 10 for performance | |
| text = tokenizer.apply_chat_template( | |
| messages, | |
| tokenize=False, | |
| add_generation_prompt=True | |
| ) | |
| model_inputs = tokenizer([text], return_tensors="pt").to(model.device) | |
| # Generate response from the model | |
| generated_ids = model.generate( | |
| **model_inputs, | |
| max_new_tokens=512 | |
| ) | |
| generated_ids = [ | |
| output_ids[len(input_ids):] for input_ids, output_ids in zip(model_inputs.input_ids, generated_ids) | |
| ] | |
| # Decode the response | |
| response = tokenizer.batch_decode(generated_ids, skip_special_tokens=True)[0] | |
| # Add bot response to chat history | |
| st.session_state.messages.append({"role": "assistant", "content": response}) | |
| # Display bot response in chat message container | |
| with st.chat_message("assistant"): | |
| st.markdown(response) | |