Spaces:
Sleeping
Sleeping
| import chromadb | |
| from langchain_openai import AzureOpenAIEmbeddings, AzureChatOpenAI | |
| from langchain.embeddings.openai import OpenAIEmbeddings | |
| from langchain_community.vectorstores import Chroma | |
| from langchain_core.output_parsers import StrOutputParser | |
| from langchain.prompts import ChatPromptTemplate | |
| from llama_index.core import Settings | |
| from groq import Groq | |
| from mem0 import MemoryClient | |
| from langchain_openai import ChatOpenAI | |
| import sys | |
| import os | |
| # Add config to path | |
| sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'config')) | |
| from config import config | |
| # Initialize embedding function for Chroma | |
| embedding_function = chromadb.utils.embedding_functions.OpenAIEmbeddingFunction( | |
| api_base=config.OPENAI_API_BASE, | |
| api_key=config.API_KEY, | |
| model_name=config.EMBEDDING_MODEL | |
| ) | |
| # Initialize OpenAI Embeddings | |
| embedding_model = OpenAIEmbeddings( | |
| openai_api_base=config.OPENAI_API_BASE, | |
| openai_api_key=config.API_KEY, | |
| model=config.EMBEDDING_MODEL | |
| ) | |
| # Initialize Chat OpenAI model | |
| llm =ChatOpenAI( | |
| openai_api_base=config.OPENAI_API_BASE, | |
| openai_api_key=config.API_KEY, | |
| model=config.CHAT_MODEL, | |
| streaming=False | |
| ) | |
| # Set LlamaIndex settings | |
| Settings.llm = llm | |
| Settings.embedding = embedding_model | |
| # Initialize vector store | |
| vector_store = Chroma( | |
| collection_name=config.COLLECTION_NAME, | |
| persist_directory=config.PERSIST_DIRECTORY, | |
| embedding_function=embedding_model | |
| ) | |
| # Create retriever | |
| retriever = vector_store.as_retriever( | |
| search_type='similarity', | |
| search_kwargs={'k': config.RETRIEVAL_K} | |
| ) | |
| # Initialize Groq client for Llama Guard | |
| llama_guard_client = Groq(api_key=config.GROQ_API_KEY) | |
| # Initialize Memory client | |
| memory_client = MemoryClient(api_key=config.MEM0_API_KEY) | |