| # import streamlit as st |
| # from langchain.llms import HuggingFaceHub |
| # from langchain.chains import ConversationChain |
| # from langchain.memory import ConversationBufferMemory |
| # import os |
|
|
| # HUGGINGFACEHUB_API_TOKEN = os.getenv("HUGGINGFACEHUB_API_TOKEN") |
|
|
| # # Model to use |
| # MODEL_REPO = "mistralai/Mixtral-8x7B-Instruct-v0.1" |
|
|
| # # Setup the LLM using LangChain + Hugging Face Inference API |
| # llm = HuggingFaceHub( |
| # repo_id=MODEL_REPO, |
| # model_kwargs={"temperature": 0.7, "max_new_tokens": 2000}, |
| # huggingfacehub_api_token=HUGGINGFACEHUB_API_TOKEN |
| # ) |
|
|
| # # Add memory to remember the chat history |
| # memory = ConversationBufferMemory() |
|
|
| # # Setup the conversation chain |
| # conversation = ConversationChain( |
| # llm=llm, |
| # memory=memory, |
| # verbose=False |
| # ) |
|
|
| # # Streamlit app |
| # st.set_page_config(page_title="DeepSeek LLM (LangChain API)", page_icon="π€") |
| # st.title("π€ DeepSeek Chatbot via LangChain (API)") |
|
|
| # user_input = st.text_input("You:", "") |
|
|
| # if user_input: |
| # response = conversation.predict(input=user_input) |
| # st.markdown(f"**π€ DeepSeek:** {response}") |
| # print(response) |
|
|
|
|