Spaces:
Sleeping
Sleeping
Commit
·
ac4679c
1
Parent(s):
1960f7b
Updated app.py to use ollama instead of gpt-4o
Browse files
app.py
CHANGED
|
@@ -14,6 +14,7 @@ from langchain.embeddings import HuggingFaceEmbeddings
|
|
| 14 |
from langchain_chroma import Chroma
|
| 15 |
from langchain.memory import ConversationBufferMemory
|
| 16 |
from langchain.chains import ConversationalRetrievalChain
|
|
|
|
| 17 |
import numpy as np
|
| 18 |
from sklearn.manifold import TSNE
|
| 19 |
import plotly.graph_objects as go
|
|
@@ -32,6 +33,7 @@ os.environ['OPENAI_API_KEY'] = os.getenv('OPENAI_API_KEY')
|
|
| 32 |
|
| 33 |
folder = "my-knowledge-base/"
|
| 34 |
db_name = "vectorstore_db"
|
|
|
|
| 35 |
|
| 36 |
def process_files(files):
|
| 37 |
os.makedirs(folder, exist_ok=True)
|
|
@@ -73,7 +75,7 @@ def process_files(files):
|
|
| 73 |
collection = vectorstore._collection
|
| 74 |
result = collection.get(include=['embeddings', 'documents', 'metadatas'])
|
| 75 |
|
| 76 |
-
llm =
|
| 77 |
memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)
|
| 78 |
retriever = vectorstore.as_retriever(search_kwargs={"k": 35})
|
| 79 |
global conversation_chain
|
|
|
|
| 14 |
from langchain_chroma import Chroma
|
| 15 |
from langchain.memory import ConversationBufferMemory
|
| 16 |
from langchain.chains import ConversationalRetrievalChain
|
| 17 |
+
from langchain_ollama import ChatOllama
|
| 18 |
import numpy as np
|
| 19 |
from sklearn.manifold import TSNE
|
| 20 |
import plotly.graph_objects as go
|
|
|
|
| 33 |
|
| 34 |
folder = "my-knowledge-base/"
|
| 35 |
db_name = "vectorstore_db"
|
| 36 |
+
MODEL = "llama3.2:latest"
|
| 37 |
|
| 38 |
def process_files(files):
|
| 39 |
os.makedirs(folder, exist_ok=True)
|
|
|
|
| 75 |
collection = vectorstore._collection
|
| 76 |
result = collection.get(include=['embeddings', 'documents', 'metadatas'])
|
| 77 |
|
| 78 |
+
llm = ChatOllama(temperature=0.7, model=MODEL)
|
| 79 |
memory = ConversationBufferMemory(memory_key='chat_history', return_messages=True)
|
| 80 |
retriever = vectorstore.as_retriever(search_kwargs={"k": 35})
|
| 81 |
global conversation_chain
|