File size: 5,487 Bytes
234eac0 a92083b 234eac0 9e11a9e 234eac0 9e11a9e a42f911 234eac0 9e11a9e 234eac0 9e11a9e 234eac0 81fd7f8 a92083b 81fd7f8 c552fa2 82d38eb c552fa2 234eac0 82d38eb 81fd7f8 1aaad7e 234eac0 81fd7f8 82d38eb 81fd7f8 234eac0 81fd7f8 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 |
import os
from typing import List
from chainlit.types import AskFileResponse
from aimakerspace.text_utils import CharacterTextSplitter, TextFileLoader
from aimakerspace.openai_utils.prompts import (
UserRolePrompt,
SystemRolePrompt,
AssistantRolePrompt,
)
from aimakerspace.openai_utils.embedding import EmbeddingModel
from aimakerspace.vectordatabase import VectorDatabase
from aimakerspace.openai_utils.chatmodel import ChatOpenAI
import chainlit as cl
import pymupdf
# QUESTION #1:
# Why do we want to support streaming? What about streaming is important, or useful?
# ANSWER #1:
# From a UX perspective, streaming allows LLMs to feel responsive to
# end users especially when a response is taking several seconds.
# especially when the response threshold is about 200-300ms
# QUESTION #2:
# Why are we using User Session here? What about Python makes us need to use this? Why not just store everything in a global variable?
# ANSWER #2:
# Using User Sessions allows us to avoid conflicts, e.g. 3 concurrent users updating a single global variable.
# This keeps the code functioning and scalable
# From a UX perspective, User Sessions allows for data separation which leads to personalization which
# Improves the overall user experience and response quality with LLMs
system_template = """\
Use the following context to extract and synthesize information to answer the user's question as accurately as possible.
Make sure that you think through each step.
If the answer is not found in the context:
1. Politely inform the user that the information is not available.
2. If possible, suggest where they might find more information or how they could rephrase their question for better clarity.
Always aim to provide clear and helpful responses."""
system_role_prompt = SystemRolePrompt(system_template)
user_prompt_template = """\
Context:
{context}
Question:
{question}
"""
user_role_prompt = UserRolePrompt(user_prompt_template)
class RetrievalAugmentedQAPipeline:
def __init__(self, llm: ChatOpenAI(), vector_db_retriever: VectorDatabase) -> None:
self.llm = llm
self.vector_db_retriever = vector_db_retriever
async def arun_pipeline(self, user_query: str):
context_list = self.vector_db_retriever.search_by_text(user_query, k=4)
context_prompt = ""
for context in context_list:
context_prompt += context[0] + "\n"
formatted_system_prompt = system_role_prompt.create_message()
formatted_user_prompt = user_role_prompt.create_message(question=user_query, context=context_prompt)
async def generate_response():
async for chunk in self.llm.astream([formatted_system_prompt, formatted_user_prompt]):
yield chunk
return {"response": generate_response(), "context": context_list}
text_splitter = CharacterTextSplitter()
def process_text_file(file: AskFileResponse):
import tempfile
with tempfile.NamedTemporaryFile(mode="w", delete=False, suffix=".txt") as temp_file:
temp_file_path = temp_file.name
with open(temp_file_path, "wb") as f:
f.write(file.content)
text_loader = TextFileLoader(temp_file_path)
documents = text_loader.load_documents()
texts = text_splitter.split_texts(documents)
return texts
def process_pdf_file(file: AskFileResponse):
import tempfile
with tempfile.NamedTemporaryFile(delete=False, suffix=".pdf") as temp_file:
temp_file_path = temp_file.name
with open(temp_file_path, "wb") as f:
f.write(file.content)
doc = pymupdf.open(temp_file_path)
texts = []
for page in doc:
texts.append(page.get_text())
# os.remove(temp_file_path) checking whether this is better
return texts
@cl.on_chat_start
async def on_chat_start():
files = None
# Wait for the user to upload a file
while files == None:
files = await cl.AskFileMessage(
content="Please upload a Text or PDF file <2MB to begin!",
accept=["text/plain", "application/pdf"],
max_size_mb=2,
timeout=180,
).send()
file = files[0]
msg = cl.Message(
content=f"Processing `{file.name}`...", disable_human_feedback=True
)
await msg.send()
# Load the file based on its type
if file.type == "text/plain":
texts = process_text_file(file)
elif file.type == "application/pdf":
texts = process_pdf_file(file)
else:
msg.content = "Unsupported file type. Please use .txt and .pdf files only"
await msg.update()
return
print(f"Processing {len(texts)} text chunks")
# Create a dict vector store
vector_db = VectorDatabase()
vector_db = await vector_db.abuild_from_list(texts)
chat_openai = ChatOpenAI()
# Create a chain
retrieval_augmented_qa_pipeline = RetrievalAugmentedQAPipeline(
vector_db_retriever=vector_db,
llm=chat_openai
)
# Let the user know that the system is ready
msg.content = f"Processing `{file.name}` done. You can now ask questions!"
await msg.update()
cl.user_session.set("chain", retrieval_augmented_qa_pipeline)
@cl.on_message
async def main(message):
chain = cl.user_session.get("chain")
msg = cl.Message(content="")
result = await chain.arun_pipeline(message.content)
async for stream_resp in result["response"]:
await msg.stream_token(stream_resp)
await msg.send()
|