Spaces:
Sleeping
Sleeping
| import streamlit as st | |
| import os | |
| from dotenv import load_dotenv | |
| # import pinecone | |
| from pinecone import Pinecone | |
| # import langchain | |
| from langchain_pinecone import PineconeVectorStore | |
| from langchain_groq import ChatGroq | |
| from langchain_community.embeddings import HuggingFaceEmbeddings | |
| from langchain_core.messages import HumanMessage, SystemMessage, AIMessage | |
| # from langchain_huggingface import HuggingFaceEmbeddings | |
| load_dotenv() | |
| st.title("Groq Chatbot with Pinecone (Free Embeddings)") | |
| # initialize pinecone database | |
| pc = Pinecone(api_key=os.environ.get("PINECONE_API_KEY")) | |
| index_name = os.environ.get("PINECONE_INDEX_NAME") | |
| index = pc.Index(index_name) | |
| # initialize embeddings model (local + free) | |
| embeddings = HuggingFaceEmbeddings( | |
| model_name="sentence-transformers/all-mpnet-base-v2" | |
| ) | |
| # create Pinecone vector store | |
| vector_store = PineconeVectorStore(index=index, embedding=embeddings) | |
| # initialize chat history | |
| if "messages" not in st.session_state: | |
| st.session_state.messages = [] | |
| st.session_state.messages.append(SystemMessage("You are a helpful assistant for answering user questions.")) | |
| # display chat messages | |
| for message in st.session_state.messages: | |
| role = "user" if isinstance(message, HumanMessage) else "assistant" | |
| with st.chat_message(role): | |
| st.markdown(message.content) | |
| # chat input | |
| prompt = st.chat_input("Ask me something...") | |
| if prompt: | |
| with st.chat_message("user"): | |
| st.markdown(prompt) | |
| st.session_state.messages.append(HumanMessage(prompt)) | |
| # initialize Groq LLM | |
| llm = ChatGroq( | |
| api_key=os.environ.get("GROQ_API_KEY"), | |
| model="llama-3.3-70b-versatile", | |
| temperature=0.7 | |
| ) | |
| # retrieve similar documents | |
| retriever = vector_store.as_retriever( | |
| search_type="similarity_score_threshold", | |
| search_kwargs={"k": 3, "score_threshold": 0.5}, | |
| ) | |
| docs = retriever.invoke(prompt) | |
| docs_text = "".join(d.page_content for d in docs) | |
| # build system prompt | |
| system_prompt = f""" | |
| You are an assistant for question-answering tasks. | |
| Use the following context to answer the question. | |
| If you don't know the answer, say you don't know. | |
| Keep your answer concise and helpful. | |
| Context: {docs_text} | |
| """ | |
| # st.session_state.messages.append(SystemMessage(system_prompt)) | |
| # # generate answer using Groq Cloud | |
| # result = llm.invoke(st.session_state.messages).content | |
| # build temporary message list for this query | |
| messages = [SystemMessage(system_prompt)] + st.session_state.messages | |
| # generate answer using Groq Cloud | |
| result = llm.invoke(messages).content | |
| # display answer | |
| with st.chat_message("assistant"): | |
| st.markdown(result) | |
| st.session_state.messages.append(AIMessage(result)) | |