coolgandhi commited on
Commit
6e9f8d9
·
1 Parent(s): 38768ad

updating rag

Browse files
Files changed (1) hide show
  1. rag.py +4 -3
rag.py CHANGED
@@ -31,7 +31,8 @@ class RAGModel:
31
  # Create embeddings
32
  self.embeddings = OpenAIEmbeddings(model="text-embedding-ada-002", openai_api_key=openai_api_key)
33
  text_documents = [str(doc) for doc in chunked_documents]
34
-
 
35
  # Create cache-backed embeddings
36
  self.store = LocalFileStore("./cache/")
37
  self.embedder = CacheBackedEmbeddings.from_bytes_store(
@@ -72,7 +73,7 @@ class RAGModel:
72
  # Format prompt
73
  prompt = self.prompt_template.format(context=context, question=question)
74
 
75
- # print(context)
76
  # Get response from chat model
77
  # response = self.chat_model(prompt)
78
  # Parse response
@@ -80,7 +81,7 @@ class RAGModel:
80
 
81
  # chain = prompt=prompt | self.chat_model | parser=self.parser
82
  # result = chain.invoke()
83
- dict_context = {"question": question}
84
  #chain = ({"context": context,"question":Runnab
85
  chain =({"context": lambda x: context,"question": RunnablePassthrough()}
86
  | self.prompt_template
 
31
  # Create embeddings
32
  self.embeddings = OpenAIEmbeddings(model="text-embedding-ada-002", openai_api_key=openai_api_key)
33
  text_documents = [str(doc) for doc in chunked_documents]
34
+ print(text_documents)
35
+
36
  # Create cache-backed embeddings
37
  self.store = LocalFileStore("./cache/")
38
  self.embedder = CacheBackedEmbeddings.from_bytes_store(
 
73
  # Format prompt
74
  prompt = self.prompt_template.format(context=context, question=question)
75
 
76
+ print(context)
77
  # Get response from chat model
78
  # response = self.chat_model(prompt)
79
  # Parse response
 
81
 
82
  # chain = prompt=prompt | self.chat_model | parser=self.parser
83
  # result = chain.invoke()
84
+ # dict_context = {"question": question}
85
  #chain = ({"context": context,"question":Runnab
86
  chain =({"context": lambda x: context,"question": RunnablePassthrough()}
87
  | self.prompt_template