Spaces:
Running
Running
Commit
·
fd611a8
1
Parent(s):
cce1341
Update app.py
Browse files
app.py
CHANGED
|
@@ -8,11 +8,15 @@ from langchain.text_splitter import CharacterTextSplitter, RecursiveCharacterTex
|
|
| 8 |
from langchain.embeddings.openai import OpenAIEmbeddings
|
| 9 |
from langchain import VectorDBQA
|
| 10 |
from langchain.llms import AzureOpenAI
|
|
|
|
|
|
|
|
|
|
|
|
|
| 11 |
|
| 12 |
os.environ["OPENAI_API_TYPE"] = openai.api_type = "azure"
|
| 13 |
-
os.environ["OPENAI_API_VERSION"] = openai.api_version = "
|
| 14 |
os.environ["OPENAI_API_BASE"] = openai.api_base = "https://openai-endpoint.openai.azure.com/"
|
| 15 |
-
openai.api_key = os.environ["OPENAI_API_KEY"]
|
| 16 |
|
| 17 |
|
| 18 |
def upload_pdf(file, pdf_text, embeddings, vectorstore, azure_embeddings, qa, progress = gr.Progress(track_tqdm=True)):
|
|
@@ -52,7 +56,8 @@ def upload_pdf(file, pdf_text, embeddings, vectorstore, azure_embeddings, qa, pr
|
|
| 52 |
documents=texts,
|
| 53 |
embeddings=embeddings,
|
| 54 |
metadatas=[{"source": "source"} for text in texts])
|
| 55 |
-
qa =
|
|
|
|
| 56 |
|
| 57 |
return pdf_text, pdf_text, embeddings, vectorstore, azure_embeddings, qa, gr.update(visible=True), gr.update(visible=True), gr.update(visible=False)
|
| 58 |
|
|
@@ -63,8 +68,9 @@ def add_text(chatstate, query, qa):
|
|
| 63 |
|
| 64 |
return chatstate, chatstate, qa
|
| 65 |
|
| 66 |
-
with gr.Blocks(css="footer {visibility: hidden}") as demo:
|
| 67 |
qa = pdf_text = embeddings = vectorstore = azure_embeddings = gr.State([])
|
|
|
|
| 68 |
with gr.Row(visible=False) as chat_row:
|
| 69 |
chatbot = gr.Chatbot()
|
| 70 |
with gr.Row(visible=False) as submit_row:
|
|
@@ -72,9 +78,6 @@ with gr.Blocks(css="footer {visibility: hidden}") as demo:
|
|
| 72 |
chatstate = gr.State([])
|
| 73 |
text.submit(add_text, [chatstate, text, qa], [chatbot, chatstate, qa])
|
| 74 |
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
# set state
|
| 79 |
with gr.Column() as upload_column:
|
| 80 |
|
|
@@ -83,7 +86,8 @@ with gr.Blocks(css="footer {visibility: hidden}") as demo:
|
|
| 83 |
output_text = gr.TextArea()
|
| 84 |
upload_btn.click(upload_pdf, inputs=[file, pdf_text, embeddings, vectorstore, azure_embeddings, qa], outputs=[output_text, pdf_text, embeddings, vectorstore, azure_embeddings, qa, chat_row, submit_row, upload_column])
|
| 85 |
|
| 86 |
-
|
|
|
|
| 87 |
|
| 88 |
|
| 89 |
demo.launch(enable_queue=True)
|
|
|
|
| 8 |
from langchain.embeddings.openai import OpenAIEmbeddings
|
| 9 |
from langchain import VectorDBQA
|
| 10 |
from langchain.llms import AzureOpenAI
|
| 11 |
+
from langchain.chains import RetrievalQA
|
| 12 |
+
from langchain.chat_models import AzureChatOpenAI
|
| 13 |
+
|
| 14 |
+
# from langchain.chat_models import AzureChatOpenAI
|
| 15 |
|
| 16 |
os.environ["OPENAI_API_TYPE"] = openai.api_type = "azure"
|
| 17 |
+
os.environ["OPENAI_API_VERSION"] = openai.api_version = "2023-03-15-preview"
|
| 18 |
os.environ["OPENAI_API_BASE"] = openai.api_base = "https://openai-endpoint.openai.azure.com/"
|
| 19 |
+
openai.api_key = os.environ["OPENAI_API_KEY"] = "b83d692637df4e339298f24790a2dcb6"
|
| 20 |
|
| 21 |
|
| 22 |
def upload_pdf(file, pdf_text, embeddings, vectorstore, azure_embeddings, qa, progress = gr.Progress(track_tqdm=True)):
|
|
|
|
| 56 |
documents=texts,
|
| 57 |
embeddings=embeddings,
|
| 58 |
metadatas=[{"source": "source"} for text in texts])
|
| 59 |
+
qa = RetrievalQA.from_chain_type(llm= AzureChatOpenAI(deployment_name="Bartos", model_name='gpt-35-turbo' ), chain_type="stuff", retriever=vectorstore.as_retriever())
|
| 60 |
+
# qa = RetrievalQA.from_chain_type(llm= AzureOpenAI(deployment_name="davinci003", model_name="text-davinci-003"), chain_type="stuff", vectorstore=vectorstore)
|
| 61 |
|
| 62 |
return pdf_text, pdf_text, embeddings, vectorstore, azure_embeddings, qa, gr.update(visible=True), gr.update(visible=True), gr.update(visible=False)
|
| 63 |
|
|
|
|
| 68 |
|
| 69 |
return chatstate, chatstate, qa
|
| 70 |
|
| 71 |
+
with gr.Blocks(css="footer {visibility: hidden}", title='PDF - Q&A') as demo:
|
| 72 |
qa = pdf_text = embeddings = vectorstore = azure_embeddings = gr.State([])
|
| 73 |
+
|
| 74 |
with gr.Row(visible=False) as chat_row:
|
| 75 |
chatbot = gr.Chatbot()
|
| 76 |
with gr.Row(visible=False) as submit_row:
|
|
|
|
| 78 |
chatstate = gr.State([])
|
| 79 |
text.submit(add_text, [chatstate, text, qa], [chatbot, chatstate, qa])
|
| 80 |
|
|
|
|
|
|
|
|
|
|
| 81 |
# set state
|
| 82 |
with gr.Column() as upload_column:
|
| 83 |
|
|
|
|
| 86 |
output_text = gr.TextArea()
|
| 87 |
upload_btn.click(upload_pdf, inputs=[file, pdf_text, embeddings, vectorstore, azure_embeddings, qa], outputs=[output_text, pdf_text, embeddings, vectorstore, azure_embeddings, qa, chat_row, submit_row, upload_column])
|
| 88 |
|
| 89 |
+
with gr.Row():
|
| 90 |
+
gr.Markdown("`now with GPT-3.5 Turbo`")
|
| 91 |
|
| 92 |
|
| 93 |
demo.launch(enable_queue=True)
|