Spaces:
Sleeping
Sleeping
File size: 1,745 Bytes
3eb4b18 273fee9 439e01b 1e74f70 3eb4b18 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 |
import chromadb
from langchain_openai import AzureOpenAIEmbeddings, AzureChatOpenAI
from langchain.embeddings.openai import OpenAIEmbeddings
from langchain_community.vectorstores import Chroma
from langchain_core.output_parsers import StrOutputParser
from langchain.prompts import ChatPromptTemplate
from llama_index.core import Settings
from groq import Groq
from mem0 import MemoryClient
from langchain_openai import ChatOpenAI
import sys
import os
# Add config to path
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'config'))
from config import config
# Initialize embedding function for Chroma
embedding_function = chromadb.utils.embedding_functions.OpenAIEmbeddingFunction(
api_base=config.OPENAI_API_BASE,
api_key=config.API_KEY,
model_name=config.EMBEDDING_MODEL
)
# Initialize OpenAI Embeddings
embedding_model = OpenAIEmbeddings(
openai_api_base=config.OPENAI_API_BASE,
openai_api_key=config.API_KEY,
model=config.EMBEDDING_MODEL
)
# Initialize Chat OpenAI model
llm =ChatOpenAI(
openai_api_base=config.OPENAI_API_BASE,
openai_api_key=config.API_KEY,
model=config.CHAT_MODEL,
streaming=False
)
# Set LlamaIndex settings
Settings.llm = llm
Settings.embedding = embedding_model
# Initialize vector store
vector_store = Chroma(
collection_name=config.COLLECTION_NAME,
persist_directory=config.PERSIST_DIRECTORY,
embedding_function=embedding_model
)
# Create retriever
retriever = vector_store.as_retriever(
search_type='similarity',
search_kwargs={'k': config.RETRIEVAL_K}
)
# Initialize Groq client for Llama Guard
llama_guard_client = Groq(api_key=config.GROQ_API_KEY)
# Initialize Memory client
memory_client = MemoryClient(api_key=config.MEM0_API_KEY)
|