Spaces:
Runtime error
Runtime error
Updated app.py with embeddings = FastEmbedEmbeddings and ChatGroq() instead of ChatOpenAI()
Browse filesUpdated app.py with embeddings = FastEmbedEmbeddings and ChatGroq() instead of ChatOpenAI()
New requirements.txt :
...
app.py
CHANGED
|
@@ -1,14 +1,18 @@
|
|
| 1 |
import os
|
| 2 |
from typing import List
|
| 3 |
|
| 4 |
-
from langchain.embeddings.openai import OpenAIEmbeddings
|
|
|
|
|
|
|
| 5 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
| 6 |
from langchain.vectorstores import Chroma
|
| 7 |
from langchain.chains import (
|
| 8 |
ConversationalRetrievalChain,
|
| 9 |
)
|
| 10 |
from langchain.document_loaders import PyPDFLoader
|
| 11 |
-
from langchain.chat_models import ChatOpenAI
|
|
|
|
|
|
|
| 12 |
from langchain.prompts.chat import (
|
| 13 |
ChatPromptTemplate,
|
| 14 |
SystemMessagePromptTemplate,
|
|
@@ -20,6 +24,14 @@ from chainlit.types import AskFileResponse
|
|
| 20 |
|
| 21 |
import chainlit as cl
|
| 22 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 23 |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
|
| 24 |
|
| 25 |
system_template = """Use the following pieces of context to answer the users question.
|
|
@@ -88,7 +100,8 @@ async def on_chat_start():
|
|
| 88 |
metadatas = [{"source": f"{i}-pl"} for i in range(len(texts))]
|
| 89 |
|
| 90 |
# Create a Chroma vector store
|
| 91 |
-
embeddings = OpenAIEmbeddings()
|
|
|
|
| 92 |
docsearch = await cl.make_async(Chroma.from_texts)(
|
| 93 |
texts, embeddings, metadatas=metadatas
|
| 94 |
)
|
|
@@ -102,9 +115,16 @@ async def on_chat_start():
|
|
| 102 |
return_messages=True,
|
| 103 |
)
|
| 104 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 105 |
# Create a chain that uses the Chroma vector store
|
| 106 |
chain = ConversationalRetrievalChain.from_llm(
|
| 107 |
-
ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0, streaming=True),
|
|
|
|
| 108 |
chain_type="stuff",
|
| 109 |
retriever=docsearch.as_retriever(),
|
| 110 |
memory=memory,
|
|
|
|
| 1 |
import os
|
| 2 |
from typing import List
|
| 3 |
|
| 4 |
+
# from langchain.embeddings.openai import OpenAIEmbeddings # ORIGINAL
|
| 5 |
+
from langchain_community.embeddings import FastEmbedEmbeddings # JB
|
| 6 |
+
|
| 7 |
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
| 8 |
from langchain.vectorstores import Chroma
|
| 9 |
from langchain.chains import (
|
| 10 |
ConversationalRetrievalChain,
|
| 11 |
)
|
| 12 |
from langchain.document_loaders import PyPDFLoader
|
| 13 |
+
# from langchain.chat_models import ChatOpenAI # ORIGINAL
|
| 14 |
+
from langchain_groq import ChatGroq # JB
|
| 15 |
+
|
| 16 |
from langchain.prompts.chat import (
|
| 17 |
ChatPromptTemplate,
|
| 18 |
SystemMessagePromptTemplate,
|
|
|
|
| 24 |
|
| 25 |
import chainlit as cl
|
| 26 |
|
| 27 |
+
# JB
|
| 28 |
+
from dotenv import load_dotenv
|
| 29 |
+
import glob
|
| 30 |
+
load_dotenv() #
|
| 31 |
+
groq_api_key = os.environ['GROQ_API_KEY']
|
| 32 |
+
# groq_api_key = "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" # os.environ['GROQ_API_KEY']
|
| 33 |
+
print"groq_api_key: ", groq_api_key)
|
| 34 |
+
|
| 35 |
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
|
| 36 |
|
| 37 |
system_template = """Use the following pieces of context to answer the users question.
|
|
|
|
| 100 |
metadatas = [{"source": f"{i}-pl"} for i in range(len(texts))]
|
| 101 |
|
| 102 |
# Create a Chroma vector store
|
| 103 |
+
# embeddings = OpenAIEmbeddings() # ORIGINAL
|
| 104 |
+
embeddings = FastEmbedEmbeddings # JB
|
| 105 |
docsearch = await cl.make_async(Chroma.from_texts)(
|
| 106 |
texts, embeddings, metadatas=metadatas
|
| 107 |
)
|
|
|
|
| 115 |
return_messages=True,
|
| 116 |
)
|
| 117 |
|
| 118 |
+
|
| 119 |
+
|
| 120 |
+
# JB
|
| 121 |
+
# llm = ChatGroq(temperature=0.2, groq_api_key=groq_api_key, model_name='mixtral-8x7b-32768')
|
| 122 |
+
|
| 123 |
+
|
| 124 |
# Create a chain that uses the Chroma vector store
|
| 125 |
chain = ConversationalRetrievalChain.from_llm(
|
| 126 |
+
# ChatOpenAI(model_name="gpt-3.5-turbo", temperature=0, streaming=True), # ORIGINAL
|
| 127 |
+
ChatGroq(temperature=0.2, groq_api_key=groq_api_key, model_name='mixtral-8x7b-32768', streaming=True), # JB
|
| 128 |
chain_type="stuff",
|
| 129 |
retriever=docsearch.as_retriever(),
|
| 130 |
memory=memory,
|