whymath commited on
Commit
a9b778f
·
1 Parent(s): 7291483

Reverting to previous pipeline to avoid dict has no invoke error

Browse files
Files changed (1) hide show
  1. utils.py +20 -20
utils.py CHANGED
@@ -30,21 +30,21 @@ def chunk_documents(docs, tiktoken_len):
30
 
31
  def create_raqa_chain_from_docs():
32
 
33
- # # Load the documents from a PDF file using PyMuPDFLoader
34
- # docs = PyMuPDFLoader("https://d18rn0p25nwr6d.cloudfront.net/CIK-0001326801/c7318154-f6ae-4866-89fa-f0c589f2ee3d.pdf").load() # TODO: Update this to enable user to upload PDF
35
- # print("Loaded", len(docs), "documents")
36
- # print(docs[0])
37
 
38
- # # Create a Qdrant vector store from the split chunks and embedding model, and obtain its retriever
39
- # split_chunks = chunk_documents(docs, tiktoken_len)
40
- # embedding_model = OpenAIEmbeddings(model="text-embedding-3-small")
41
- # qdrant_vectorstore = Qdrant.from_documents(
42
- # split_chunks,
43
- # embedding_model,
44
- # location=":memory:",
45
- # collection_name="LoadedPDF",
46
- # )
47
- # qdrant_retriever = qdrant_vectorstore.as_retriever()
48
 
49
  # Define the RAG prompt template
50
  RAG_PROMPT = """
@@ -57,13 +57,13 @@ def create_raqa_chain_from_docs():
57
 
58
  # Create the retrieval augmented QA chain using the Qdrant retriever, RAG prompt, and OpenAI chat model
59
  openai_chat_model = ChatOpenAI(model="gpt-3.5-turbo")
60
- # retrieval_augmented_qa_chain = (
61
- # {"context": itemgetter("question") | qdrant_retriever, "question": itemgetter("question")}
62
- # | RunnablePassthrough.assign(context=itemgetter("context"))
63
- # | {"response": rag_prompt | openai_chat_model, "context": itemgetter("context")}
64
- # )
65
  retrieval_augmented_qa_chain = (
66
- {"response": rag_prompt | openai_chat_model}
 
 
67
  )
 
 
 
68
 
69
  return retrieval_augmented_qa_chain
 
30
 
31
  def create_raqa_chain_from_docs():
32
 
33
+ # Load the documents from a PDF file using PyMuPDFLoader
34
+ docs = PyMuPDFLoader("https://d18rn0p25nwr6d.cloudfront.net/CIK-0001326801/c7318154-f6ae-4866-89fa-f0c589f2ee3d.pdf").load() # TODO: Update this to enable user to upload PDF
35
+ print("Loaded", len(docs), "documents")
36
+ print(docs[0])
37
 
38
+ # Create a Qdrant vector store from the split chunks and embedding model, and obtain its retriever
39
+ split_chunks = chunk_documents(docs, tiktoken_len)
40
+ embedding_model = OpenAIEmbeddings(model="text-embedding-3-small")
41
+ qdrant_vectorstore = Qdrant.from_documents(
42
+ split_chunks,
43
+ embedding_model,
44
+ location=":memory:",
45
+ collection_name="LoadedPDF",
46
+ )
47
+ qdrant_retriever = qdrant_vectorstore.as_retriever()
48
 
49
  # Define the RAG prompt template
50
  RAG_PROMPT = """
 
57
 
58
  # Create the retrieval augmented QA chain using the Qdrant retriever, RAG prompt, and OpenAI chat model
59
  openai_chat_model = ChatOpenAI(model="gpt-3.5-turbo")
 
 
 
 
 
60
  retrieval_augmented_qa_chain = (
61
+ {"context": itemgetter("question") | qdrant_retriever, "question": itemgetter("question")}
62
+ | RunnablePassthrough.assign(context=itemgetter("context"))
63
+ | {"response": rag_prompt | openai_chat_model, "context": itemgetter("context")}
64
  )
65
+ # retrieval_augmented_qa_chain = (
66
+ # {"response": rag_prompt | openai_chat_model}
67
+ # )
68
 
69
  return retrieval_augmented_qa_chain