Spaces:
Sleeping
Sleeping
Ilyas KHIAT
commited on
Commit
·
bcc9af3
1
Parent(s):
8135e56
what if
Browse files
rag.py
CHANGED
|
@@ -103,7 +103,7 @@ def get_random_chunk(scene_specific = [2]) : # scene_specific = None signifie qu
|
|
| 103 |
return chunks[random.randint(0, len(chunks) - 1)],scene_specific
|
| 104 |
|
| 105 |
|
| 106 |
-
def get_vectorstore() -> FAISS:
|
| 107 |
index = faiss.IndexFlatL2(len(embedding.embed_query("hello world")))
|
| 108 |
vector_store = FAISS(
|
| 109 |
embedding_function=embedding,
|
|
@@ -116,7 +116,8 @@ def get_vectorstore() -> FAISS:
|
|
| 116 |
vector_store.add_documents(documents=documents, ids=uuids)
|
| 117 |
return vector_store
|
| 118 |
|
| 119 |
-
vectore_store = get_vectorstore()
|
|
|
|
| 120 |
|
| 121 |
|
| 122 |
def generate_sphinx_response() -> sphinx_output:
|
|
@@ -148,6 +149,10 @@ def retrieve_context_from_vectorestore(query:str) -> str:
|
|
| 148 |
retriever = vectore_store.as_retriever(search_type="mmr", search_kwargs={"k": 3})
|
| 149 |
return retriever.invoke(query)
|
| 150 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 151 |
|
| 152 |
def generate_stream(query:str,messages = [], model = "gpt-4o-mini", max_tokens = 300, temperature = 1,index_name="",stream=True,vector_store=None):
|
| 153 |
try:
|
|
@@ -179,7 +184,7 @@ def generate_whatif_stream(question:str,response:str, stream:bool = False) -> st
|
|
| 179 |
prompt = PromptTemplate.from_template(template_whatif)
|
| 180 |
llm_chain = prompt | llm | StrOutputParser()
|
| 181 |
print("Enter whatif")
|
| 182 |
-
context =
|
| 183 |
print(f"Context: {context}")
|
| 184 |
|
| 185 |
if stream:
|
|
|
|
| 103 |
return chunks[random.randint(0, len(chunks) - 1)],scene_specific
|
| 104 |
|
| 105 |
|
| 106 |
+
def get_vectorstore(chunks) -> FAISS:
|
| 107 |
index = faiss.IndexFlatL2(len(embedding.embed_query("hello world")))
|
| 108 |
vector_store = FAISS(
|
| 109 |
embedding_function=embedding,
|
|
|
|
| 116 |
vector_store.add_documents(documents=documents, ids=uuids)
|
| 117 |
return vector_store
|
| 118 |
|
| 119 |
+
vectore_store = get_vectorstore(chunks)
|
| 120 |
+
scenes_vectore_store = get_vectorstore(scenes)
|
| 121 |
|
| 122 |
|
| 123 |
def generate_sphinx_response() -> sphinx_output:
|
|
|
|
| 149 |
retriever = vectore_store.as_retriever(search_type="mmr", search_kwargs={"k": 3})
|
| 150 |
return retriever.invoke(query)
|
| 151 |
|
| 152 |
+
def retrieve_context_from_scenes(query:str) -> str:
|
| 153 |
+
retriever = scenes_vectore_store.as_retriever(search_kwargs={"k": 1})
|
| 154 |
+
return retriever.invoke(query)
|
| 155 |
+
|
| 156 |
|
| 157 |
def generate_stream(query:str,messages = [], model = "gpt-4o-mini", max_tokens = 300, temperature = 1,index_name="",stream=True,vector_store=None):
|
| 158 |
try:
|
|
|
|
| 184 |
prompt = PromptTemplate.from_template(template_whatif)
|
| 185 |
llm_chain = prompt | llm | StrOutputParser()
|
| 186 |
print("Enter whatif")
|
| 187 |
+
context = retrieve_context_from_scenes(f"question: {question} . reponse : {response}")
|
| 188 |
print(f"Context: {context}")
|
| 189 |
|
| 190 |
if stream:
|