Spaces:
Sleeping
Sleeping
Switched to 3.5-turbo; updated db directory
Browse files
app.py
CHANGED
|
@@ -18,7 +18,7 @@ from langchain.schema import AIMessage, HumanMessage, SystemMessage
|
|
| 18 |
embeddings = OpenAIEmbeddings()
|
| 19 |
|
| 20 |
# Loads database from persisted directory
|
| 21 |
-
db_directory = "
|
| 22 |
db = Chroma(persist_directory=db_directory, embedding_function=embeddings)
|
| 23 |
|
| 24 |
# This is code that retrieves relevant documents based on a similarity search (in this case, it grabs the top 2 relevant documents or chunks)
|
|
@@ -30,7 +30,7 @@ with open('system_prompt.txt', 'r') as file:
|
|
| 30 |
openai.api_key = os.getenv("OPENAI_API_KEY")
|
| 31 |
|
| 32 |
#chat = ChatOpenAI(model_name="gpt-3.5-turbo",temperature=0) # Faster for experiments
|
| 33 |
-
chat = ChatOpenAI(model_name="gpt-
|
| 34 |
|
| 35 |
# Make sure we don't exceed estimation of token limit:
|
| 36 |
TOKEN_LIMIT = 4096 # GPT-3.5 Turbo token limit
|
|
|
|
| 18 |
embeddings = OpenAIEmbeddings()
|
| 19 |
|
| 20 |
# Loads database from persisted directory
|
| 21 |
+
db_directory = "chroma_db_v2"
|
| 22 |
db = Chroma(persist_directory=db_directory, embedding_function=embeddings)
|
| 23 |
|
| 24 |
# This is code that retrieves relevant documents based on a similarity search (in this case, it grabs the top 2 relevant documents or chunks)
|
|
|
|
| 30 |
openai.api_key = os.getenv("OPENAI_API_KEY")
|
| 31 |
|
| 32 |
#chat = ChatOpenAI(model_name="gpt-3.5-turbo",temperature=0) # Faster for experiments
|
| 33 |
+
chat = ChatOpenAI(model_name="gpt-3.5-turbo",temperature=0)
|
| 34 |
|
| 35 |
# Make sure we don't exceed estimation of token limit:
|
| 36 |
TOKEN_LIMIT = 4096 # GPT-3.5 Turbo token limit
|