Spaces:
Sleeping
Sleeping
File size: 1,980 Bytes
96bc16a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 |
import streamlit as st
from langchain_huggingface import HuggingFaceEndpoint
from langchain_core.prompts import PromptTemplate
import os
# Set up your Hugging Face API token
os.environ["HUGGINGFACEHUB_API_TOKEN"] = st.secrets["HF_TOKEN"]
# Define the models
models = {
"Mistral-7B-Instruct-v0.2": "mistralai/Mistral-7B-Instruct-v0.2",
"Mistral-7B-Instruct-v0.3": "mistralai/Mistral-7B-Instruct-v0.3",
"GPT-2": "gpt2",
"BLOOM": "bigscience/bloom",
"OPT": "facebook/opt-350m"
}
# Initialize session state
if 'messages' not in st.session_state:
st.session_state.messages = []
# Streamlit app
st.title("Multi-Model LLM Chat")
# Model selection
selected_model = st.selectbox("Choose a model", list(models.keys()))
# User input
user_input = st.text_input("Your message:")
# Initialize LLM
@st.cache_resource
def get_llm(model_name):
return HuggingFaceEndpoint(
repo_id=models[model_name],
max_length=128,
temperature=0.7
)
llm = get_llm(selected_model)
# Chat prompt template
prompt = PromptTemplate(
template="Human: {human_input}\n\nAssistant: Let's think about this step-by-step:",
input_variables=["human_input"]
)
# Generate response
if user_input:
# Add user message to chat history
st.session_state.messages.append({"role": "user", "content": user_input})
# Generate LLM response
with st.spinner("Generating response..."):
full_prompt = prompt.format(human_input=user_input)
response = llm.invoke(full_prompt)
# Add assistant response to chat history
st.session_state.messages.append({"role": "assistant", "content": response})
# Display chat history
for message in st.session_state.messages:
with st.chat_message(message["role"]):
st.write(message["content"])
# Clear chat button
if st.button("Clear Chat"):
st.session_state.messages = []
st.experimental_rerun()
|