Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -1,95 +1,51 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
from aimakerspace.text_utils import TextFileLoader, CharacterTextSplitter
|
| 4 |
-
from aimakerspace.vectordatabase import VectorDatabase
|
| 5 |
-
import asyncio
|
| 6 |
-
|
| 7 |
-
text_loader = TextFileLoader("data/KingLear.txt")
|
| 8 |
-
documents = text_loader.load_documents()
|
| 9 |
-
len(documents)
|
| 10 |
-
|
| 11 |
-
text_splitter = CharacterTextSplitter()
|
| 12 |
-
split_documents = text_splitter.split_texts(documents)
|
| 13 |
|
|
|
|
| 14 |
import os
|
| 15 |
-
import openai
|
| 16 |
-
from getpass import getpass
|
| 17 |
-
|
| 18 |
-
openai.api_key = getpass("OpenAI API Key: ")
|
| 19 |
-
os.environ["OPENAI_API_KEY"] = openai.api_key
|
| 20 |
-
|
| 21 |
-
vector_db = VectorDatabase()
|
| 22 |
-
vector_db = asyncio.run(vector_db.abuild_from_list(split_documents))
|
| 23 |
-
|
| 24 |
import sys
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
from
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
)
|
| 32 |
-
|
| 33 |
-
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
"You are an expert in {expertise}, you always answer in a kind way."
|
| 40 |
-
)
|
| 41 |
-
system_role_prompt = SystemRolePrompt(system_prompt_template)
|
| 42 |
-
|
| 43 |
-
messages = [
|
| 44 |
-
user_role_prompt.create_message(
|
| 45 |
-
content="What is the best way to write a loop?"
|
| 46 |
-
),
|
| 47 |
-
system_role_prompt.create_message(expertise="Python"),
|
| 48 |
-
]
|
| 49 |
-
|
| 50 |
-
response = chat_openai.run(messages)
|
| 51 |
-
|
| 52 |
-
RAQA_PROMPT_TEMPLATE = """
|
| 53 |
-
Use the provided context to answer the user's query.
|
| 54 |
-
|
| 55 |
-
You may not answer the user's query unless there is specific context in the following text.
|
| 56 |
-
|
| 57 |
-
If you do not know the answer, or cannot answer, please respond with "I don't know".
|
| 58 |
|
| 59 |
-
|
| 60 |
-
|
| 61 |
"""
|
| 62 |
|
| 63 |
-
raqa_prompt = SystemRolePrompt(RAQA_PROMPT_TEMPLATE)
|
| 64 |
|
| 65 |
-
|
| 66 |
-
|
| 67 |
-
{
|
| 68 |
-
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
|
| 70 |
-
|
| 71 |
|
| 72 |
-
class RetrievalAugmentedQAPipeline:
|
| 73 |
-
def __init__(self, llm: ChatOpenAI(), vector_db_retriever: VectorDatabase) -> None:
|
| 74 |
-
self.llm = llm
|
| 75 |
-
self.vector_db_retriever = vector_db_retriever
|
| 76 |
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
|
| 80 |
-
context_prompt = ""
|
| 81 |
-
for context in context_list:
|
| 82 |
-
context_prompt += context[0] + "\n"
|
| 83 |
|
| 84 |
-
|
| 85 |
|
| 86 |
-
|
| 87 |
-
|
| 88 |
-
return self.llm.run([formatted_system_prompt, formatted_user_prompt])
|
| 89 |
-
|
| 90 |
-
|
| 91 |
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
)
|
|
|
|
| 1 |
+
# You can find this code for Chainlit python streaming here (https://docs.chainlit.io/concepts/streaming/python)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
|
| 3 |
+
# OpenAI Chat completion
|
| 4 |
import os
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 5 |
import sys
|
| 6 |
+
from openai import AsyncOpenAI # importing openai for API usage
|
| 7 |
+
import chainlit as cl # importing chainlit for our app
|
| 8 |
+
from chainlit.prompt import Prompt, PromptMessage # importing prompt tools
|
| 9 |
+
from chainlit.playground.providers import ChatOpenAI # importing ChatOpenAI tools
|
| 10 |
+
from dotenv import load_dotenv
|
| 11 |
+
|
| 12 |
+
load_dotenv()
|
| 13 |
+
sys.path.append(".")
|
| 14 |
+
import raqa
|
| 15 |
+
from raqa import retrieval_augmented_qa_pipeline
|
| 16 |
+
|
| 17 |
+
# ChatOpenAI Templates
|
| 18 |
+
system_template = """You are a helpful assistant who always speaks in a pleasant tone!
|
| 19 |
+
"""
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 20 |
|
| 21 |
+
user_template = """{input}
|
| 22 |
+
Think through your response step by step.
|
| 23 |
"""
|
| 24 |
|
|
|
|
| 25 |
|
| 26 |
+
@cl.on_chat_start # marks a function that will be executed at the start of a user session
|
| 27 |
+
async def start_chat():
|
| 28 |
+
settings = {
|
| 29 |
+
"model": "gpt-3.5-turbo",
|
| 30 |
+
"temperature": 0,
|
| 31 |
+
"max_tokens": 500,
|
| 32 |
+
"top_p": 1,
|
| 33 |
+
"frequency_penalty": 0,
|
| 34 |
+
"presence_penalty": 0,
|
| 35 |
+
}
|
| 36 |
|
| 37 |
+
cl.user_session.set("settings", settings)
|
| 38 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 39 |
|
| 40 |
+
@cl.on_message # marks a function that should be run each time the chatbot receives a message from a user
|
| 41 |
+
async def main(message: cl.Message):
|
| 42 |
+
settings = cl.user_session.get("settings")
|
|
|
|
|
|
|
|
|
|
| 43 |
|
| 44 |
+
client = AsyncOpenAI()
|
| 45 |
|
| 46 |
+
# Do some raqa stuff
|
| 47 |
+
msg = cl.Message(content=message.content)
|
|
|
|
|
|
|
|
|
|
| 48 |
|
| 49 |
+
|
| 50 |
+
# Send and close the message stream
|
| 51 |
+
await msg.send()
|
|
|