Update tool/rag.py
Browse files- tool/rag.py +5 -4
tool/rag.py
CHANGED
|
@@ -30,8 +30,8 @@ from langchain_community.vectorstores import FAISS
|
|
| 30 |
from torch import cuda, bfloat16
|
| 31 |
device = f'cuda:{cuda.current_device()}' if cuda.is_available() else 'cpu'
|
| 32 |
from langchain_openai import OpenAIEmbeddings
|
| 33 |
-
embeddings = OpenAIEmbeddings(api_key=
|
| 34 |
-
|
| 35 |
vectorstore=FAISS.load_local(r"rag", embeddings,allow_dangerous_deserialization =True)
|
| 36 |
|
| 37 |
|
|
@@ -67,8 +67,9 @@ class rag(BaseTool):
|
|
| 67 |
|
| 68 |
def __init__(self, path: str = None):
|
| 69 |
super().__init__( )
|
| 70 |
-
self.llm = ChatOpenAI(model="gpt-4o-2024-11-20",api_key=
|
| 71 |
-
base_url=
|
|
|
|
| 72 |
self.path = path
|
| 73 |
# api keys
|
| 74 |
|
|
|
|
| 30 |
from torch import cuda, bfloat16
|
| 31 |
device = f'cuda:{cuda.current_device()}' if cuda.is_available() else 'cpu'
|
| 32 |
from langchain_openai import OpenAIEmbeddings
|
| 33 |
+
embeddings = OpenAIEmbeddings(api_key=api_key=os.getenv("OPENAI_API_KEY"),
|
| 34 |
+
base_url=os.getenv("OPENAI_API_BASE"))
|
| 35 |
vectorstore=FAISS.load_local(r"rag", embeddings,allow_dangerous_deserialization =True)
|
| 36 |
|
| 37 |
|
|
|
|
| 67 |
|
| 68 |
def __init__(self, path: str = None):
|
| 69 |
super().__init__( )
|
| 70 |
+
self.llm = ChatOpenAI(model="gpt-4o-2024-11-20",api_key=os.getenv("OPENAI_API_KEY"),
|
| 71 |
+
base_url=os.getenv("OPENAI_API_BASE")
|
| 72 |
+
)
|
| 73 |
self.path = path
|
| 74 |
# api keys
|
| 75 |
|