Spaces:
Runtime error
Runtime error
update cosmos db
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- Dockerfile +0 -10
- README.md +5 -3
- STANDARD_SOFTWARE LIFECYCLES.pdf +0 -0
- __pycache__/callback.cpython-39.pyc +0 -0
- __pycache__/config.cpython-39.pyc +0 -0
- __pycache__/utils.cpython-39.pyc +0 -0
- __pycache__/vector_db.cpython-39.pyc +0 -0
- app.py +201 -109
- auth.json +14 -0
- chains/__pycache__/azure_openai.cpython-39.pyc +0 -0
- chains/__pycache__/create_topic.cpython-39.pyc +0 -0
- chains/__pycache__/custom_chain.cpython-39.pyc +0 -0
- chains/__pycache__/decision_maker.cpython-39.pyc +0 -0
- chains/__pycache__/model.cpython-39.pyc +0 -0
- chains/__pycache__/multi_queries.cpython-39.pyc +0 -0
- chains/__pycache__/openai_model.cpython-39.pyc +0 -0
- chains/__pycache__/related_question.cpython-39.pyc +0 -0
- chains/__pycache__/simple_chain.cpython-39.pyc +0 -0
- chains/__pycache__/stage_analyzer.cpython-39.pyc +0 -0
- chains/__pycache__/summary.cpython-39.pyc +0 -0
- chains/__pycache__/web_search.cpython-39.pyc +0 -0
- chains/create_topic.py +26 -0
- chains/custom_chain.py +4 -6
- chains/decision_maker.py +29 -0
- chains/openai_model.py +172 -152
- chains/qaibot_chain.py +81 -0
- chains/related_question.py +35 -0
- chains/simple_chain.py +22 -0
- chains/summary.py +27 -9
- chains/web_search.py +5 -8
- config.py +26 -12
- cosmos_db.py +73 -0
- custom.css +1026 -0
- custom_vectordb.py +421 -0
- data.json +0 -0
- geckodriver.log +0 -0
- history/binh/2023-08-06_17-10-17/Assistance Inquiry.json +1 -0
- html_parser.py +0 -116
- logo.png +0 -0
- process_fb.py +0 -55
- process_html.py +0 -58
- prompts/__pycache__/condense_llm.cpython-39.pyc +0 -0
- prompts/__pycache__/create_topic.cpython-39.pyc +0 -0
- prompts/__pycache__/custom_chain.cpython-39.pyc +0 -0
- prompts/__pycache__/decision_maker.cpython-39.pyc +0 -0
- prompts/__pycache__/llm.cpython-39.pyc +0 -0
- prompts/__pycache__/multi_queries.cpython-39.pyc +0 -0
- prompts/__pycache__/related_question.cpython-39.pyc +0 -0
- prompts/__pycache__/simple_chain.cpython-39.pyc +0 -0
- prompts/__pycache__/stage_analyzer.cpython-39.pyc +0 -0
Dockerfile
DELETED
|
@@ -1,10 +0,0 @@
|
|
| 1 |
-
FROM python:3.10
|
| 2 |
-
|
| 3 |
-
WORKDIR /usr/src/app
|
| 4 |
-
|
| 5 |
-
COPY requirements.txt ./
|
| 6 |
-
RUN pip install --no-cache-dir -r requirements.txt
|
| 7 |
-
|
| 8 |
-
COPY . .
|
| 9 |
-
|
| 10 |
-
CMD [ "python", "app.py" ]
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
README.md
CHANGED
|
@@ -1,13 +1,15 @@
|
|
| 1 |
---
|
| 2 |
-
title:
|
| 3 |
-
emoji:
|
| 4 |
colorFrom: red
|
| 5 |
colorTo: gray
|
| 6 |
sdk: gradio
|
| 7 |
-
sdk_version: 3.
|
| 8 |
python_version: 3.9.13
|
| 9 |
app_file: app.py
|
| 10 |
pinned: false
|
|
|
|
|
|
|
| 11 |
---
|
| 12 |
|
| 13 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
|
|
|
| 1 |
---
|
| 2 |
+
title: 🤖FPT.QAI AI Assistant
|
| 3 |
+
emoji: 🤖
|
| 4 |
colorFrom: red
|
| 5 |
colorTo: gray
|
| 6 |
sdk: gradio
|
| 7 |
+
sdk_version: 3.39.0
|
| 8 |
python_version: 3.9.13
|
| 9 |
app_file: app.py
|
| 10 |
pinned: false
|
| 11 |
+
fullWidth: true
|
| 12 |
+
|
| 13 |
---
|
| 14 |
|
| 15 |
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
STANDARD_SOFTWARE LIFECYCLES.pdf
ADDED
|
Binary file (864 kB). View file
|
|
|
__pycache__/callback.cpython-39.pyc
DELETED
|
Binary file (1.15 kB)
|
|
|
__pycache__/config.cpython-39.pyc
DELETED
|
Binary file (1.18 kB)
|
|
|
__pycache__/utils.cpython-39.pyc
DELETED
|
Binary file (4.06 kB)
|
|
|
__pycache__/vector_db.cpython-39.pyc
DELETED
|
Binary file (4.58 kB)
|
|
|
app.py
CHANGED
|
@@ -2,8 +2,10 @@ import gradio as gr
|
|
| 2 |
|
| 3 |
from utils import *
|
| 4 |
from chains.openai_model import OpenAIModel
|
| 5 |
-
from config import SEVER, PORT, DEBUG, DEPLOYMENT_ID
|
| 6 |
-
from vector_db import delete_all, delete_file, handle_upload_file,
|
|
|
|
|
|
|
| 7 |
|
| 8 |
# Get and load new model
|
| 9 |
def get_model(llm_model_name, temperature=0., top_p=1.0):
|
|
@@ -15,126 +17,197 @@ def get_model(llm_model_name, temperature=0., top_p=1.0):
|
|
| 15 |
def create_new_model():
|
| 16 |
return get_model(llm_model_name=DEPLOYMENT_ID)
|
| 17 |
|
|
|
|
| 18 |
def update_database(files_src):
|
| 19 |
message = handle_upload_file(files_src)
|
| 20 |
-
|
| 21 |
-
return gr.update(choices=
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 22 |
|
| 23 |
# Gradio app
|
| 24 |
-
|
| 25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 26 |
user_name = gr.State("")
|
| 27 |
history = gr.State([])
|
| 28 |
current_model = gr.State(create_new_model)
|
|
|
|
| 29 |
|
| 30 |
-
with gr.Row():
|
| 31 |
-
with gr.Column(
|
| 32 |
-
gr.HTML(
|
|
|
|
|
|
|
| 33 |
status_text = ""
|
| 34 |
status_display = gr.Markdown(status_text, elem_id="status_display")
|
| 35 |
|
| 36 |
with gr.Row().style(equal_height=True):
|
| 37 |
-
with gr.Column(scale=
|
| 38 |
-
with gr.
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
# ask_examples_hidden = gr.Textbox(elem_id="hidden-message")
|
| 46 |
-
examples_questions = gr.Examples(
|
| 47 |
-
[
|
| 48 |
-
"Bagaimana cara saya memohon sewa gerai?",
|
| 49 |
-
"Bagaimana cara saya pergi dari Komtar ke Pengkalan Weld?",
|
| 50 |
-
"Bagaimana cara saya boleh kemaskini Alamat Surat Menyurat Cukai Taksiran",
|
| 51 |
-
"What is event's permit at Penang?",
|
| 52 |
-
"How to apply car parking at Penang?",
|
| 53 |
-
"Where can I request for my event’s permit in Penang?"
|
| 54 |
-
],
|
| 55 |
-
[user_input],
|
| 56 |
-
examples_per_page=6,
|
| 57 |
)
|
| 58 |
-
|
| 59 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 60 |
with gr.Row():
|
| 61 |
-
|
| 62 |
-
"🧹 New conversation", elem_id="empty_btn")
|
| 63 |
-
retryBtn = gr.Button("🔄 Retry")
|
| 64 |
-
rec = gr.Button("⏺️Record")
|
| 65 |
-
record_audio = gr.inputs.Audio(source="microphone", type="filepath")
|
| 66 |
with gr.Row():
|
| 67 |
-
gr.
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
- Get **semantic search** answers from your document using **vector databases**.
|
| 72 |
-
- Perform a **Google search** within the app
|
| 73 |
-
- **Verify sources** for all generated results.
|
| 74 |
-
- Support converting **speech to text** for easy input.
|
| 75 |
-
### Pine cone
|
| 76 |
-
Pinecone makes it easy to provide long-term memory for high-performance AI applications.
|
| 77 |
-
It's a managed, cloud-native vector database with a simple API and no infrastructure hassles. Pinecone serves fresh, filtered query results with low latency at the scale of billions of vectors.
|
| 78 |
-
https://www.pinecone.io/blog/azure/
|
| 79 |
-
### Azure OpenAI Service
|
| 80 |
-
https://learn.microsoft.com/en-us/legal/cognitive-services/openai/data-privacy
|
| 81 |
-
## 📧 Contact
|
| 82 |
-
This tool has been developed by the R&D lab at **QAI** (FPT Software, Ha Noi, Viet Nam)
|
| 83 |
-
If you have any questions or feature requests, please feel free to reach us out at <b>khangnvt1@fpt.com</b>.
|
| 84 |
-
"""
|
| 85 |
-
)
|
| 86 |
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
|
| 93 |
-
|
| 94 |
-
|
| 95 |
-
|
| 96 |
-
|
| 97 |
-
|
| 98 |
-
|
| 99 |
-
|
| 100 |
-
minimum=-0,
|
| 101 |
-
maximum=1.0,
|
| 102 |
-
value=0.0,
|
| 103 |
-
step=0.1,
|
| 104 |
-
interactive=True,
|
| 105 |
-
label="Temperature",
|
| 106 |
-
)
|
| 107 |
-
top_p_slider = gr.Slider(
|
| 108 |
-
minimum=-0,
|
| 109 |
-
maximum=1.0,
|
| 110 |
-
value=1.0,
|
| 111 |
-
step=0.1,
|
| 112 |
-
interactive=True,
|
| 113 |
-
label="Top_p",
|
| 114 |
-
)
|
| 115 |
-
user_identifier = gr.Textbox(
|
| 116 |
-
show_label=True,
|
| 117 |
-
placeholder="Enter here",
|
| 118 |
-
label="User name",
|
| 119 |
-
value=user_name.value,
|
| 120 |
-
lines=1,
|
| 121 |
-
)
|
| 122 |
-
loadHistoryBtn = gr.Button("💾 Load History")
|
| 123 |
-
with gr.Tab(label="Knowledge DB"):
|
| 124 |
-
all_files = gr.Dropdown(
|
| 125 |
-
label="All available files:", multiselect=True, choices=os.listdir(SAVE_DIR), interactive=True
|
| 126 |
-
)
|
| 127 |
-
with gr.Column():
|
| 128 |
-
delete_btn = gr.Button("🗑️ Delete")
|
| 129 |
-
with gr.Column():
|
| 130 |
-
delete_all_btn = gr.Button("🗑️ Delete all")
|
| 131 |
-
update_btn = gr.Button("🗑️ Update DB")
|
| 132 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 133 |
index_files.change(update_database, [index_files], [all_files, status_display])
|
|
|
|
| 134 |
delete_all_btn.click(delete_all, None, [all_files, status_display, index_files])
|
| 135 |
delete_btn.click(delete_file, [all_files], [all_files, status_display, index_files])
|
| 136 |
-
update_btn.click(
|
| 137 |
-
|
| 138 |
emptyBtn.click(
|
| 139 |
reset,
|
| 140 |
inputs=[current_model],
|
|
@@ -142,18 +215,37 @@ with gr.Blocks() as demo:
|
|
| 142 |
show_progress=True,
|
| 143 |
)
|
| 144 |
|
| 145 |
-
retryBtn.click(retry, [chatbot, current_model, use_websearch, custom_websearch], [chatbot])
|
| 146 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 147 |
|
| 148 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 149 |
|
| 150 |
-
|
| 151 |
|
| 152 |
-
user_identifier.change(
|
| 153 |
|
| 154 |
-
user_input.submit(predict, [chatbot, current_model, user_input,
|
|
|
|
|
|
|
|
|
|
| 155 |
user_input.submit(lambda: "", None, user_input)
|
| 156 |
-
submitBtn.click(predict, [chatbot, current_model, user_input,
|
|
|
|
|
|
|
|
|
|
| 157 |
submitBtn.click(lambda: "", None, user_input)
|
| 158 |
demo.queue(concurrency_count=10).launch(
|
| 159 |
-
server_name=SEVER, server_port=PORT, debug=DEBUG)
|
|
|
|
| 2 |
|
| 3 |
from utils import *
|
| 4 |
from chains.openai_model import OpenAIModel
|
| 5 |
+
from config import SEVER, PORT, DEBUG, DEPLOYMENT_ID
|
| 6 |
+
from vector_db import delete_all, delete_file, handle_upload_file, load_files_blob
|
| 7 |
+
from theme_dropdown import create_theme_dropdown
|
| 8 |
+
|
| 9 |
|
| 10 |
# Get and load new model
|
| 11 |
def get_model(llm_model_name, temperature=0., top_p=1.0):
|
|
|
|
| 17 |
def create_new_model():
|
| 18 |
return get_model(llm_model_name=DEPLOYMENT_ID)
|
| 19 |
|
| 20 |
+
|
| 21 |
def update_database(files_src):
|
| 22 |
message = handle_upload_file(files_src)
|
| 23 |
+
available_files = load_files_blob()
|
| 24 |
+
return gr.update(choices=available_files), message
|
| 25 |
+
|
| 26 |
+
|
| 27 |
+
def get_available_files():
|
| 28 |
+
available_files = load_files_blob()
|
| 29 |
+
return gr.update(choices=available_files), gr.update(visible=True)
|
| 30 |
+
|
| 31 |
+
|
| 32 |
+
def update_example(chatbot, set_save_file_name):
|
| 33 |
+
from chains.related_question import RelatedQuestion
|
| 34 |
+
from chains.create_topic import CreateTopic
|
| 35 |
+
related_question = RelatedQuestion()
|
| 36 |
+
outputs = chatbot[-1][1].split("<div")[0]
|
| 37 |
+
res = related_question.predict(inputs=chatbot[-1][0], outputs=outputs)
|
| 38 |
+
out = list(map(lambda x: x.split('- ')[-1], res.split('\n')))
|
| 39 |
+
samples = [[a] for a in out]
|
| 40 |
+
if len(chatbot) == 1:
|
| 41 |
+
topic_chain = CreateTopic()
|
| 42 |
+
topic = topic_chain.predict(inputs=chatbot[-1][0], outputs=outputs)
|
| 43 |
+
set_save_file_name = topic
|
| 44 |
+
return chatbot, gr.Dataset.update(samples=samples), samples, set_save_file_name
|
| 45 |
+
|
| 46 |
+
|
| 47 |
+
def load_example(example_id, samples):
|
| 48 |
+
return samples[example_id][0]
|
| 49 |
+
|
| 50 |
|
| 51 |
# Gradio app
|
| 52 |
+
with open("custom.css", "r", encoding="utf-8") as f:
|
| 53 |
+
customCSS = f.read()
|
| 54 |
+
dropdown, js = create_theme_dropdown()
|
| 55 |
+
|
| 56 |
+
head = """
|
| 57 |
+
<html lang="en">
|
| 58 |
+
<head>
|
| 59 |
+
<meta charset="utf-8">
|
| 60 |
+
<meta name="viewport" content="width=device-width, initial-scale=1">
|
| 61 |
+
<title>FPT Bot</title>
|
| 62 |
+
<link href="https://cdn.jsdelivr.net/npm/bootstrap@5.3.1/dist/css/bootstrap.min.css" rel="stylesheet" integrity="sha384-4bw+/aepP/YC94hEpVNVgiZdgIC5+VKNBQNGCHeKRQN+PtmoHDEXuppvnDJzQIu9" crossorigin="anonymous">
|
| 63 |
+
</head>
|
| 64 |
+
<body>
|
| 65 |
+
<script src="https://cdn.jsdelivr.net/npm/bootstrap@5.3.1/dist/js/bootstrap.bundle.min.js" integrity="sha384-HwwvtgBNo3bZJJLYd8oVXjrBZt8cqVSpeBNS5n7C8IVInixGAoxmnlMuBnhbgrkm" crossorigin="anonymous"></script>
|
| 66 |
+
</body>
|
| 67 |
+
</html>
|
| 68 |
+
"""
|
| 69 |
+
checkbox_js = """
|
| 70 |
+
async () => {
|
| 71 |
+
// Select all checkboxes with the class 'svelte-1ojmf70'
|
| 72 |
+
const checkboxes = document.querySelectorAll('.svelte-1ojmf70[type="checkbox"]');
|
| 73 |
+
|
| 74 |
+
// Add a click event listener to each checkbox
|
| 75 |
+
checkboxes.forEach(checkbox => {
|
| 76 |
+
checkbox.addEventListener('click', function() {
|
| 77 |
+
// If this checkbox was checked, uncheck all others
|
| 78 |
+
if (this.checked) {
|
| 79 |
+
checkboxes.forEach(otherCheckbox => {
|
| 80 |
+
if (otherCheckbox !== this) {
|
| 81 |
+
otherCheckbox.checked = false;
|
| 82 |
+
}
|
| 83 |
+
});
|
| 84 |
+
}
|
| 85 |
+
});
|
| 86 |
+
});
|
| 87 |
+
}
|
| 88 |
+
"""
|
| 89 |
+
|
| 90 |
+
title = """<h1 align="left" style="min-width:200px; margin-top:6px; white-space: nowrap;">AI Assistant 🤖</h1>"""
|
| 91 |
+
logo = """
|
| 92 |
+
<div class="logo"></div>
|
| 93 |
+
"""
|
| 94 |
+
user_input = gr.Textbox()
|
| 95 |
+
|
| 96 |
+
with gr.Blocks(css=customCSS, theme='minatosnow/qaigpt') as demo:
|
| 97 |
+
samples = gr.State()
|
| 98 |
user_name = gr.State("")
|
| 99 |
history = gr.State([])
|
| 100 |
current_model = gr.State(create_new_model)
|
| 101 |
+
gr.HTML(head)
|
| 102 |
|
| 103 |
+
with gr.Row(elem_classes="status-div"):
|
| 104 |
+
with gr.Column():
|
| 105 |
+
gr.HTML(logo)
|
| 106 |
+
user_info = gr.Markdown(value="getting user info...", elem_id="user_info")
|
| 107 |
+
with gr.Column():
|
| 108 |
status_text = ""
|
| 109 |
status_display = gr.Markdown(status_text, elem_id="status_display")
|
| 110 |
|
| 111 |
with gr.Row().style(equal_height=True):
|
| 112 |
+
with gr.Column(scale=1):
|
| 113 |
+
with gr.Tab(label="Database"):
|
| 114 |
+
with gr.Accordion("Upload file", open=True, visible=False) as acc:
|
| 115 |
+
with gr.Row():
|
| 116 |
+
index_files = gr.Files(label="Files", type="file")
|
| 117 |
+
|
| 118 |
+
all_files = gr.Dropdown(
|
| 119 |
+
label=None, show_label=False, multiselect=True, choices=load_files_blob(), interactive=True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 120 |
)
|
| 121 |
+
with gr.Row():
|
| 122 |
+
with gr.Column(min_width=42, scale=1):
|
| 123 |
+
delete_btn = gr.Button("", elem_classes="btn btn-del tooltip-btn tooltip-del")
|
| 124 |
+
with gr.Column(min_width=42, scale=1):
|
| 125 |
+
delete_all_btn = gr.Button("", elem_classes="btn btn-del-all tooltip-btn tooltip-del-all")
|
| 126 |
+
upload_files_btn = gr.Checkbox(label="Upload files", value=False, elem_classes="switch_checkbox")
|
| 127 |
+
local_db = gr.Checkbox(label="Local knowledge DB", value=False, elem_classes="switch_checkbox")
|
| 128 |
+
custom_websearch = gr.Checkbox(label="FPT web search", value=False, elem_classes="switch_checkbox")
|
| 129 |
+
local_db.change(None, _js=checkbox_js)
|
| 130 |
+
upload_files_btn.change(None, _js=checkbox_js)
|
| 131 |
+
custom_websearch.change(None, _js=checkbox_js)
|
| 132 |
+
|
| 133 |
+
with gr.Tab(label="History"):
|
| 134 |
+
with gr.Accordion("Save/Load conversation history"):
|
| 135 |
+
with gr.Column():
|
| 136 |
+
with gr.Row():
|
| 137 |
+
with gr.Column(scale=6):
|
| 138 |
+
history_file_dropdown = gr.Dropdown(
|
| 139 |
+
label="Load conversation from list",
|
| 140 |
+
choices=get_history_names(plain=True),
|
| 141 |
+
multiselect=False,
|
| 142 |
+
container=False,
|
| 143 |
+
)
|
| 144 |
+
with gr.Row():
|
| 145 |
+
with gr.Column(min_width=42, scale=1):
|
| 146 |
+
historyRefreshBtn = gr.Button("🔄 Refresh")
|
| 147 |
+
with gr.Column(min_width=42, scale=1):
|
| 148 |
+
historyDeleteBtn = gr.Button("🗑️ Delete")
|
| 149 |
+
with gr.Row():
|
| 150 |
+
with gr.Column(scale=6):
|
| 151 |
+
set_save_file_name = gr.Textbox(
|
| 152 |
+
show_label=True,
|
| 153 |
+
placeholder=None,
|
| 154 |
+
label="Topic (File name)",
|
| 155 |
+
)
|
| 156 |
+
with gr.Column(scale=1):
|
| 157 |
+
saveHistoryBtn = gr.Button("💾 Save History")
|
| 158 |
+
|
| 159 |
+
with gr.Tab(label="Theme"):
|
| 160 |
+
toggle_dark = gr.Button(value="Toggle Light/Dark")
|
| 161 |
+
toggle_dark.click(
|
| 162 |
+
None,
|
| 163 |
+
_js="""
|
| 164 |
+
() => {
|
| 165 |
+
document.body.classList.toggle('dark');
|
| 166 |
+
}
|
| 167 |
+
""",
|
| 168 |
+
)
|
| 169 |
+
with gr.Column(scale=9):
|
| 170 |
with gr.Row():
|
| 171 |
+
chatbot = gr.Chatbot(show_label=False, elem_classes="chatbot", show_share_button=False, height=650)
|
|
|
|
|
|
|
|
|
|
|
|
|
| 172 |
with gr.Row():
|
| 173 |
+
examples_questions = gr.Dataset(samples=[], components=[user_input], type="index")
|
| 174 |
+
with gr.Row(elem_classes="chatrow"):
|
| 175 |
+
with gr.Column(min_width=225, scale=10):
|
| 176 |
+
user_input = gr.Textbox(show_label=False, placeholder="Ask me anything...", container=False)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 177 |
|
| 178 |
+
with gr.Column(min_width=42, scale=1):
|
| 179 |
+
submitBtn = gr.Button("", elem_classes="btn btn-send tooltip-btn tooltip-content-send")
|
| 180 |
+
with gr.Column(min_width=42, scale=1):
|
| 181 |
+
record_audio = gr.Audio(source="microphone",
|
| 182 |
+
show_label=False,
|
| 183 |
+
elem_classes="audio-btn btn",
|
| 184 |
+
type="filepath")
|
| 185 |
+
with gr.Column(min_width=42, scale=1):
|
| 186 |
+
emptyBtn = gr.Button(
|
| 187 |
+
"", elem_classes="btn btn-clear tooltip-btn tooltip-content-clear")
|
| 188 |
+
|
| 189 |
+
with gr.Row(elem_classes="footer"):
|
| 190 |
+
gr.HTML("""<footer>🤖 QGPT – Developed by FPT.QAI</footer>""")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 191 |
|
| 192 |
+
|
| 193 |
+
def create_greeting(request: gr.Request):
|
| 194 |
+
if hasattr(request, "username") and request.username: # is not None or is not ""
|
| 195 |
+
print(f"User Name: {request.username}")
|
| 196 |
+
user_info, user_name = gr.Markdown.update(value=f"Hi {request.username}!"), request.username
|
| 197 |
+
else:
|
| 198 |
+
user_info, user_name = gr.Markdown.update(value="", visible=False), ""
|
| 199 |
+
current_model = get_model(llm_model_name=DEPLOYMENT_ID)
|
| 200 |
+
current_model.set_user_identifier(user_name)
|
| 201 |
+
return user_info, user_name, current_model, get_history_names(False, user_name)
|
| 202 |
+
demo.load(create_greeting, inputs=None, outputs=[user_info, user_name, current_model, history_file_dropdown])
|
| 203 |
+
|
| 204 |
+
examples_questions.click(load_example, inputs=[examples_questions, samples], outputs=[user_input])
|
| 205 |
index_files.change(update_database, [index_files], [all_files, status_display])
|
| 206 |
+
upload_files_btn.change(get_available_files, None, [all_files, acc])
|
| 207 |
delete_all_btn.click(delete_all, None, [all_files, status_display, index_files])
|
| 208 |
delete_btn.click(delete_file, [all_files], [all_files, status_display, index_files])
|
| 209 |
+
# update_btn.click(update_fb, None, [status_display])
|
| 210 |
+
|
| 211 |
emptyBtn.click(
|
| 212 |
reset,
|
| 213 |
inputs=[current_model],
|
|
|
|
| 215 |
show_progress=True,
|
| 216 |
)
|
| 217 |
|
| 218 |
+
# retryBtn.click(retry, [chatbot, current_model, use_websearch, custom_websearch], [chatbot, status_display])
|
| 219 |
|
| 220 |
+
saveHistoryBtn.click(save_chat_history, [current_model, chatbot, set_save_file_name], [status_display])
|
| 221 |
+
historyRefreshBtn.click(get_history_names, [gr.State(False), user_name], [history_file_dropdown])
|
| 222 |
+
historyDeleteBtn.click(delete_chat_history, [current_model, history_file_dropdown], [status_display, history_file_dropdown, chatbot])
|
| 223 |
+
history_file_dropdown.change(load_chat_history, [current_model, history_file_dropdown], [set_save_file_name, chatbot])
|
| 224 |
|
| 225 |
+
record_audio.start_recording(None, None, None,
|
| 226 |
+
_js="""
|
| 227 |
+
async () => {
|
| 228 |
+
document.querySelectorAll('.sm.secondary').forEach(function(element) {
|
| 229 |
+
element.classList.remove('secondary');
|
| 230 |
+
element.classList.add('tertiary');
|
| 231 |
+
});
|
| 232 |
+
}
|
| 233 |
+
"""
|
| 234 |
+
)
|
| 235 |
|
| 236 |
+
record_audio.stop_recording(transcribe, [current_model, record_audio], [user_input, record_audio])
|
| 237 |
|
| 238 |
+
# user_identifier.change(set_user_identifier, [current_model, user_identifier], None)
|
| 239 |
|
| 240 |
+
user_input.submit(predict, [chatbot, current_model, user_input, upload_files_btn, custom_websearch, local_db],
|
| 241 |
+
[chatbot, status_display], show_progress=True).then(update_example, [chatbot, set_save_file_name],
|
| 242 |
+
[chatbot, examples_questions, samples,
|
| 243 |
+
set_save_file_name])
|
| 244 |
user_input.submit(lambda: "", None, user_input)
|
| 245 |
+
submitBtn.click(predict, [chatbot, current_model, user_input, upload_files_btn, custom_websearch, local_db],
|
| 246 |
+
[chatbot, status_display], show_progress=True).then(update_example, [chatbot, set_save_file_name],
|
| 247 |
+
[chatbot, examples_questions, samples,
|
| 248 |
+
set_save_file_name])
|
| 249 |
submitBtn.click(lambda: "", None, user_input)
|
| 250 |
demo.queue(concurrency_count=10).launch(
|
| 251 |
+
server_name=SEVER, server_port=PORT, auth=get_auth(), debug=DEBUG)
|
auth.json
ADDED
|
@@ -0,0 +1,14 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"user1": {
|
| 3 |
+
"username": "binh",
|
| 4 |
+
"password": "123456"
|
| 5 |
+
},
|
| 6 |
+
"user2": {
|
| 7 |
+
"username": "hung",
|
| 8 |
+
"password": "123456"
|
| 9 |
+
},
|
| 10 |
+
"user3": {
|
| 11 |
+
"username": "khang",
|
| 12 |
+
"password": "123456"
|
| 13 |
+
}
|
| 14 |
+
}
|
chains/__pycache__/azure_openai.cpython-39.pyc
CHANGED
|
Binary files a/chains/__pycache__/azure_openai.cpython-39.pyc and b/chains/__pycache__/azure_openai.cpython-39.pyc differ
|
|
|
chains/__pycache__/create_topic.cpython-39.pyc
ADDED
|
Binary file (1.25 kB). View file
|
|
|
chains/__pycache__/custom_chain.cpython-39.pyc
CHANGED
|
Binary files a/chains/__pycache__/custom_chain.cpython-39.pyc and b/chains/__pycache__/custom_chain.cpython-39.pyc differ
|
|
|
chains/__pycache__/decision_maker.cpython-39.pyc
CHANGED
|
Binary files a/chains/__pycache__/decision_maker.cpython-39.pyc and b/chains/__pycache__/decision_maker.cpython-39.pyc differ
|
|
|
chains/__pycache__/model.cpython-39.pyc
DELETED
|
Binary file (5.7 kB)
|
|
|
chains/__pycache__/multi_queries.cpython-39.pyc
DELETED
|
Binary file (1.43 kB)
|
|
|
chains/__pycache__/openai_model.cpython-39.pyc
CHANGED
|
Binary files a/chains/__pycache__/openai_model.cpython-39.pyc and b/chains/__pycache__/openai_model.cpython-39.pyc differ
|
|
|
chains/__pycache__/related_question.cpython-39.pyc
ADDED
|
Binary file (1.69 kB). View file
|
|
|
chains/__pycache__/simple_chain.cpython-39.pyc
ADDED
|
Binary file (1.1 kB). View file
|
|
|
chains/__pycache__/stage_analyzer.cpython-39.pyc
DELETED
|
Binary file (1.23 kB)
|
|
|
chains/__pycache__/summary.cpython-39.pyc
CHANGED
|
Binary files a/chains/__pycache__/summary.cpython-39.pyc and b/chains/__pycache__/summary.cpython-39.pyc differ
|
|
|
chains/__pycache__/web_search.cpython-39.pyc
CHANGED
|
Binary files a/chains/__pycache__/web_search.cpython-39.pyc and b/chains/__pycache__/web_search.cpython-39.pyc differ
|
|
|
chains/create_topic.py
ADDED
|
@@ -0,0 +1,26 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain.chains.llm import LLMChain
|
| 2 |
+
from langchain.prompts.chat import (
|
| 3 |
+
ChatPromptTemplate,
|
| 4 |
+
SystemMessagePromptTemplate,
|
| 5 |
+
HumanMessagePromptTemplate)
|
| 6 |
+
from prompts.create_topic import SYSTEM_PROMPT_TEMPLATE, HUMAN_PROMPT_TEMPLATE
|
| 7 |
+
from config import OPENAI_API_TYPE, OPENAI_API_VERSION, OPENAI_API_KEY, OPENAI_API_BASE, DEPLOYMENT_ID
|
| 8 |
+
from chains.azure_openai import CustomAzureOpenAI
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class CreateTopic(LLMChain):
|
| 12 |
+
llm = CustomAzureOpenAI(deployment_name=DEPLOYMENT_ID,
|
| 13 |
+
openai_api_type=OPENAI_API_TYPE,
|
| 14 |
+
openai_api_base=OPENAI_API_BASE,
|
| 15 |
+
openai_api_version=OPENAI_API_VERSION,
|
| 16 |
+
openai_api_key=OPENAI_API_KEY,
|
| 17 |
+
temperature=0.0)
|
| 18 |
+
prompt = ChatPromptTemplate.from_messages(
|
| 19 |
+
[
|
| 20 |
+
SystemMessagePromptTemplate.from_template(SYSTEM_PROMPT_TEMPLATE),
|
| 21 |
+
HumanMessagePromptTemplate.from_template(HUMAN_PROMPT_TEMPLATE)
|
| 22 |
+
])
|
| 23 |
+
if __name__ == "__main__":
|
| 24 |
+
chain = CreateTopic()
|
| 25 |
+
out = chain.predict(inputs="Hello", outputs="Hello! how can I assis you today?")
|
| 26 |
+
print(out)
|
chains/custom_chain.py
CHANGED
|
@@ -11,7 +11,6 @@ from config import DEPLOYMENT_ID
|
|
| 11 |
from prompts.custom_chain import SYSTEM_PROMPT_TEMPLATE, HUMAN_PROMPT_TEMPLATE
|
| 12 |
from config import OPENAI_API_TYPE, OPENAI_API_VERSION, OPENAI_API_KEY, OPENAI_API_BASE
|
| 13 |
from chains.azure_openai import CustomAzureOpenAI
|
| 14 |
-
import os
|
| 15 |
|
| 16 |
class MultiQueriesChain(LLMChain):
|
| 17 |
llm = CustomAzureOpenAI(deployment_name=DEPLOYMENT_ID,
|
|
@@ -41,12 +40,11 @@ class CustomConversationalRetrievalChain(ConversationalRetrievalChain):
|
|
| 41 |
docs = self.retriever.get_relevant_documents(
|
| 42 |
question
|
| 43 |
)
|
|
|
|
| 44 |
for (idx, d) in enumerate(docs):
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
item = [d.page_content.strip("�"), os.path.basename(d.metadata["source"])]
|
| 49 |
-
d.page_content = f'[{idx+1}]\t "{item[0]}"\nSource: {item[1]}'
|
| 50 |
return self._reduce_tokens_below_limit(docs)
|
| 51 |
# def _get_docs(self, question: str, inputs: Dict[str, Any]) -> List[Document]:
|
| 52 |
# results = llm_chain.predict(question=question) + "\n"
|
|
|
|
| 11 |
from prompts.custom_chain import SYSTEM_PROMPT_TEMPLATE, HUMAN_PROMPT_TEMPLATE
|
| 12 |
from config import OPENAI_API_TYPE, OPENAI_API_VERSION, OPENAI_API_KEY, OPENAI_API_BASE
|
| 13 |
from chains.azure_openai import CustomAzureOpenAI
|
|
|
|
| 14 |
|
| 15 |
class MultiQueriesChain(LLMChain):
|
| 16 |
llm = CustomAzureOpenAI(deployment_name=DEPLOYMENT_ID,
|
|
|
|
| 40 |
docs = self.retriever.get_relevant_documents(
|
| 41 |
question
|
| 42 |
)
|
| 43 |
+
# Add attribute to docs call docs.citation
|
| 44 |
for (idx, d) in enumerate(docs):
|
| 45 |
+
item = [d.page_content.strip("�"), d.metadata["source"]]
|
| 46 |
+
d.page_content = f'[{idx+1}] {item[0]}'
|
| 47 |
+
d.metadata["source"] = f'{item[1]}'
|
|
|
|
|
|
|
| 48 |
return self._reduce_tokens_below_limit(docs)
|
| 49 |
# def _get_docs(self, question: str, inputs: Dict[str, Any]) -> List[Document]:
|
| 50 |
# results = llm_chain.predict(question=question) + "\n"
|
chains/decision_maker.py
ADDED
|
@@ -0,0 +1,29 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain.chains.llm import LLMChain
|
| 2 |
+
from langchain.prompts.chat import (
|
| 3 |
+
ChatPromptTemplate,
|
| 4 |
+
SystemMessagePromptTemplate,
|
| 5 |
+
HumanMessagePromptTemplate)
|
| 6 |
+
from prompts.decision_maker import SYSTEM_PROMPT_TEMPLATE, HUMAN_PROMPT_TEMPLATE
|
| 7 |
+
from config import OPENAI_API_TYPE, OPENAI_API_VERSION, OPENAI_API_KEY, OPENAI_API_BASE, DEPLOYMENT_ID
|
| 8 |
+
from chains.azure_openai import CustomAzureOpenAI
|
| 9 |
+
|
| 10 |
+
class DecisionMaker(LLMChain):
|
| 11 |
+
llm = CustomAzureOpenAI(deployment_name=DEPLOYMENT_ID,
|
| 12 |
+
openai_api_type=OPENAI_API_TYPE,
|
| 13 |
+
openai_api_base=OPENAI_API_BASE,
|
| 14 |
+
openai_api_version=OPENAI_API_VERSION,
|
| 15 |
+
openai_api_key=OPENAI_API_KEY,
|
| 16 |
+
temperature=0.0)
|
| 17 |
+
prompt = ChatPromptTemplate.from_messages(
|
| 18 |
+
[
|
| 19 |
+
SystemMessagePromptTemplate.from_template(SYSTEM_PROMPT_TEMPLATE),
|
| 20 |
+
HumanMessagePromptTemplate.from_template(HUMAN_PROMPT_TEMPLATE)
|
| 21 |
+
])
|
| 22 |
+
|
| 23 |
+
|
| 24 |
+
if __name__ == "__main__":
|
| 25 |
+
rel = DecisionMaker()
|
| 26 |
+
query = "Ai là tổng thống Mỹ"
|
| 27 |
+
|
| 28 |
+
res = rel.predict(query = query)
|
| 29 |
+
print(res)
|
chains/openai_model.py
CHANGED
|
@@ -1,34 +1,36 @@
|
|
| 1 |
import json
|
| 2 |
import os
|
|
|
|
|
|
|
| 3 |
import openai
|
| 4 |
|
| 5 |
from langchain.prompts import PromptTemplate
|
| 6 |
-
from config import TIMEOUT_STREAM
|
| 7 |
from vector_db import upload_file
|
| 8 |
from callback import StreamingGradioCallbackHandler
|
| 9 |
from queue import SimpleQueue, Empty, Queue
|
| 10 |
from threading import Thread
|
| 11 |
-
from utils import
|
| 12 |
from chains.custom_chain import CustomConversationalRetrievalChain
|
| 13 |
from langchain.chains import LLMChain
|
| 14 |
from chains.azure_openai import CustomAzureOpenAI
|
| 15 |
from config import OPENAI_API_TYPE, OPENAI_API_VERSION, OPENAI_API_KEY, OPENAI_API_BASE, API_KEY, \
|
| 16 |
-
|
| 17 |
-
|
| 18 |
|
| 19 |
class OpenAIModel:
|
| 20 |
def __init__(
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
):
|
| 33 |
self.llm_model_name = llm_model_name
|
| 34 |
self.condense_model_name = condense_model_name
|
|
@@ -43,13 +45,14 @@ class OpenAIModel:
|
|
| 43 |
self.history = []
|
| 44 |
self.user_identifier = user
|
| 45 |
|
| 46 |
-
def
|
| 47 |
-
self.user_identifier =
|
| 48 |
|
| 49 |
def format_prompt(self, qa_prompt_template, condense_prompt_template):
|
| 50 |
# Prompt template langchain
|
| 51 |
qa_prompt = PromptTemplate(template=qa_prompt_template, input_variables=["question", "chat_history", "context"])
|
| 52 |
-
condense_prompt = PromptTemplate(template=condense_prompt_template,
|
|
|
|
| 53 |
return qa_prompt, condense_prompt
|
| 54 |
|
| 55 |
def memory(self, inputs, outputs, last_k=3):
|
|
@@ -65,166 +68,85 @@ class OpenAIModel:
|
|
| 65 |
def delete_first_conversation(self):
|
| 66 |
if self.history:
|
| 67 |
self.history.pop(0)
|
| 68 |
-
|
| 69 |
def delete_last_conversation(self):
|
| 70 |
if len(self.history) > 0:
|
| 71 |
self.history.pop()
|
| 72 |
-
|
| 73 |
-
def auto_save_history(self, chatbot):
|
| 74 |
-
if self.user_identifier is not None:
|
| 75 |
-
file_path = history_file_path(self.user_identifier)
|
| 76 |
-
json_s = {"history": self.history, "chatbot": chatbot}
|
| 77 |
-
with open(file_path, "w", encoding='utf-8') as f:
|
| 78 |
-
json.dump(json_s, f, ensure_ascii=False)
|
| 79 |
-
|
| 80 |
-
def load_history(self):
|
| 81 |
-
lasted_file = load_lasted_file_username(self.user_identifier)
|
| 82 |
-
if lasted_file is not None:
|
| 83 |
-
with open(f"{lasted_file}.json", "r", encoding="utf-8") as f:
|
| 84 |
-
json_s = json.load(f)
|
| 85 |
-
self.history = json_s["history"]
|
| 86 |
-
chatbot = json_s["chatbot"]
|
| 87 |
-
return chatbot
|
| 88 |
|
| 89 |
-
def
|
| 90 |
-
|
| 91 |
-
|
| 92 |
-
api_key=API_KEY,
|
| 93 |
-
model=MODEL_ID,
|
| 94 |
-
file=media_file
|
| 95 |
-
)
|
| 96 |
-
return response["text"]
|
| 97 |
|
| 98 |
-
def
|
| 99 |
-
|
| 100 |
-
|
| 101 |
|
| 102 |
-
|
| 103 |
-
|
| 104 |
-
|
| 105 |
-
from config import GOOGLE_API_KEY, GOOGLE_CSE_ID, CUSTOM_API_KEY, CUSTOM_CSE_ID
|
| 106 |
-
from chains.summary import WebSummary
|
| 107 |
-
from chains.multi_queries import MultiQueries
|
| 108 |
-
|
| 109 |
-
status_text = "Retrieving information from the web"
|
| 110 |
-
yield chatbot, status_text
|
| 111 |
-
if use_websearch:
|
| 112 |
-
google_api_key = GOOGLE_API_KEY
|
| 113 |
-
google_cse_id = GOOGLE_CSE_ID
|
| 114 |
-
else:
|
| 115 |
-
google_api_key = CUSTOM_API_KEY
|
| 116 |
-
google_cse_id = CUSTOM_CSE_ID
|
| 117 |
-
search = GoogleSearchAPIWrapper(google_api_key=google_api_key, google_cse_id=google_cse_id)
|
| 118 |
-
|
| 119 |
-
queries_chain = MultiQueries()
|
| 120 |
-
out = queries_chain.predict(question=inputs)
|
| 121 |
-
queries = list(map(lambda x: x.split(': ')[-1], out.split('\n\n')))
|
| 122 |
-
print(queries)
|
| 123 |
-
results = []
|
| 124 |
-
for query in queries:
|
| 125 |
-
search_rs = search.results(query, 2)
|
| 126 |
-
results.extend(search_rs)
|
| 127 |
-
reference_results = []
|
| 128 |
-
display_append = []
|
| 129 |
-
for idx, result in enumerate(results[:3]):
|
| 130 |
-
try:
|
| 131 |
-
head = requests.head(result['link'])
|
| 132 |
-
print(result["link"])
|
| 133 |
-
status_text = "Access " + result['link']
|
| 134 |
-
yield chatbot, status_text
|
| 135 |
-
if "text/html" in head.headers['Content-Type']:
|
| 136 |
-
html_response = requests.get(result['link'])
|
| 137 |
-
soup = BeautifulSoup(html_response.content, "html.parser")
|
| 138 |
-
try:
|
| 139 |
-
web_summary = WebSummary()
|
| 140 |
-
text = soup.get_text()
|
| 141 |
-
lines = (line.strip() for line in text.splitlines())
|
| 142 |
-
# break multi-headlines into a line each
|
| 143 |
-
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
|
| 144 |
-
# drop blank lines
|
| 145 |
-
text = '\n'.join(chunk for chunk in chunks if chunk)
|
| 146 |
-
|
| 147 |
-
summary = web_summary.predict(question=inputs, doc=text)
|
| 148 |
-
print("Can access", result['link'])
|
| 149 |
-
# break into lines and remove leading and trailing space on each
|
| 150 |
-
|
| 151 |
-
except:
|
| 152 |
-
print("Cannot access ", result['link'])
|
| 153 |
-
yield chatbot, status_text
|
| 154 |
-
reference_results.append([summary, result['link']])
|
| 155 |
-
display_append.append(
|
| 156 |
-
f"<a href=\"{result['link']}\" target=\"_blank\">{idx+1}. {result['title']}</a>"
|
| 157 |
-
)
|
| 158 |
-
except:
|
| 159 |
-
continue
|
| 160 |
|
| 161 |
-
|
| 162 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 163 |
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
ai_response = web_search.predict(context="\n\n".join(reference_results), question=inputs, chat_history=self.history)
|
| 169 |
-
|
| 170 |
-
chatbot[-1] = (chatbot[-1][0], ai_response+display_append)
|
| 171 |
-
self.memory(inputs, ai_response)
|
| 172 |
-
self.auto_save_history(chatbot)
|
| 173 |
-
yield chatbot, status_text
|
| 174 |
-
|
| 175 |
-
else:
|
| 176 |
status_text = "Indexing files to vector database"
|
| 177 |
yield chatbot, status_text
|
|
|
|
| 178 |
|
| 179 |
-
vectorstore = upload_file()
|
| 180 |
-
|
| 181 |
-
status_text = "OpenAI version: " + OPENAI_API_VERSION
|
| 182 |
-
yield chatbot, status_text
|
| 183 |
qa_prompt, condense_prompt = self.format_prompt(**kwargs)
|
| 184 |
job_done = object() # signals the processing is done
|
| 185 |
q = SimpleQueue()
|
| 186 |
if streaming:
|
| 187 |
timeout = TIMEOUT_STREAM
|
| 188 |
-
streaming_callback =[StreamingGradioCallbackHandler(q)]
|
| 189 |
|
| 190 |
# Define llm model
|
| 191 |
-
llm = CustomAzureOpenAI(deployment_name=DEPLOYMENT_ID,
|
| 192 |
openai_api_type=OPENAI_API_TYPE,
|
| 193 |
openai_api_base=OPENAI_API_BASE,
|
| 194 |
openai_api_version=OPENAI_API_VERSION,
|
| 195 |
openai_api_key=OPENAI_API_KEY,
|
| 196 |
temperature=self.temperature,
|
| 197 |
-
model_kwargs={"top_p": self.top_p},
|
| 198 |
-
streaming=streaming
|
| 199 |
-
callbacks=streaming_callback,
|
| 200 |
request_timeout=timeout)
|
| 201 |
-
|
| 202 |
-
condense_llm = CustomAzureOpenAI(deployment_name=self.condense_model_name,
|
| 203 |
-
|
| 204 |
-
|
| 205 |
-
|
| 206 |
-
|
| 207 |
-
|
| 208 |
|
| 209 |
status_text = "Request URL: " + OPENAI_API_BASE
|
| 210 |
yield chatbot, status_text
|
| 211 |
-
# Create a
|
| 212 |
# Create a Queue object
|
| 213 |
response_queue = SimpleQueue()
|
| 214 |
|
| 215 |
def task():
|
| 216 |
-
#
|
| 217 |
-
qa = CustomConversationalRetrievalChain.from_llm(llm, vectorstore.as_retriever(
|
| 218 |
-
|
| 219 |
-
|
| 220 |
-
|
| 221 |
-
|
|
|
|
| 222 |
# query with input and chat history
|
| 223 |
response = qa({"question": inputs, "chat_history": self.history})
|
| 224 |
response_queue.put(response)
|
| 225 |
q.put(job_done)
|
| 226 |
-
|
| 227 |
-
|
| 228 |
thread = Thread(target=task)
|
| 229 |
thread.start()
|
| 230 |
chatbot.append((inputs, ""))
|
|
@@ -243,16 +165,114 @@ class OpenAIModel:
|
|
| 243 |
# add citation info to response
|
| 244 |
response = response_queue.get()
|
| 245 |
relevant_docs = response["source_documents"]
|
| 246 |
-
|
| 247 |
-
|
| 248 |
-
|
| 249 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 250 |
yield chatbot, status_text
|
| 251 |
|
| 252 |
self.memory(inputs, content)
|
| 253 |
-
self.auto_save_history(chatbot)
|
| 254 |
thread.join()
|
| 255 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 256 |
if __name__ == '__main__':
|
| 257 |
import os
|
| 258 |
from config import OPENAI_API_KEY
|
|
@@ -261,7 +281,8 @@ if __name__ == '__main__':
|
|
| 261 |
ChatPromptTemplate,
|
| 262 |
SystemMessagePromptTemplate,
|
| 263 |
HumanMessagePromptTemplate)
|
| 264 |
-
|
|
|
|
| 265 |
HUMAN_PROMPT_TEMPLATE = "Human: {question}\n AI answer:"
|
| 266 |
prompt = ChatPromptTemplate.from_messages(
|
| 267 |
[
|
|
@@ -269,13 +290,12 @@ if __name__ == '__main__':
|
|
| 269 |
HumanMessagePromptTemplate.from_template(HUMAN_PROMPT_TEMPLATE)
|
| 270 |
]
|
| 271 |
)
|
| 272 |
-
|
| 273 |
-
llm = CustomAzureOpenAI(deployment_name="binh-gpt",
|
| 274 |
openai_api_key=OPENAI_API_KEY,
|
| 275 |
openai_api_base=OPENAI_API_BASE,
|
| 276 |
openai_api_version=OPENAI_API_VERSION,
|
| 277 |
temperature=0,
|
| 278 |
-
model_kwargs={"top_p": 1.0},)
|
| 279 |
llm_chain = LLMChain(
|
| 280 |
llm=llm,
|
| 281 |
prompt=prompt
|
|
|
|
| 1 |
import json
|
| 2 |
import os
|
| 3 |
+
import re
|
| 4 |
+
|
| 5 |
import openai
|
| 6 |
|
| 7 |
from langchain.prompts import PromptTemplate
|
| 8 |
+
from config import TIMEOUT_STREAM, HISTORY_DIR
|
| 9 |
from vector_db import upload_file
|
| 10 |
from callback import StreamingGradioCallbackHandler
|
| 11 |
from queue import SimpleQueue, Empty, Queue
|
| 12 |
from threading import Thread
|
| 13 |
+
from utils import add_source_numbers, add_details, web_citation, get_history_names
|
| 14 |
from chains.custom_chain import CustomConversationalRetrievalChain
|
| 15 |
from langchain.chains import LLMChain
|
| 16 |
from chains.azure_openai import CustomAzureOpenAI
|
| 17 |
from config import OPENAI_API_TYPE, OPENAI_API_VERSION, OPENAI_API_KEY, OPENAI_API_BASE, API_KEY, \
|
| 18 |
+
DEPLOYMENT_ID, MODEL_ID
|
| 19 |
+
from cosmos_db import upsert_item, read_item, delete_items, query_items
|
| 20 |
|
| 21 |
class OpenAIModel:
|
| 22 |
def __init__(
|
| 23 |
+
self,
|
| 24 |
+
llm_model_name,
|
| 25 |
+
condense_model_name,
|
| 26 |
+
prompt_template="",
|
| 27 |
+
temperature=0.0,
|
| 28 |
+
top_p=1.0,
|
| 29 |
+
n_choices=1,
|
| 30 |
+
stop=None,
|
| 31 |
+
presence_penalty=0,
|
| 32 |
+
frequency_penalty=0,
|
| 33 |
+
user=None
|
| 34 |
):
|
| 35 |
self.llm_model_name = llm_model_name
|
| 36 |
self.condense_model_name = condense_model_name
|
|
|
|
| 45 |
self.history = []
|
| 46 |
self.user_identifier = user
|
| 47 |
|
| 48 |
+
def set_user_identifier(self, new_user_identifier):
|
| 49 |
+
self.user_identifier = new_user_identifier
|
| 50 |
|
| 51 |
def format_prompt(self, qa_prompt_template, condense_prompt_template):
|
| 52 |
# Prompt template langchain
|
| 53 |
qa_prompt = PromptTemplate(template=qa_prompt_template, input_variables=["question", "chat_history", "context"])
|
| 54 |
+
condense_prompt = PromptTemplate(template=condense_prompt_template,
|
| 55 |
+
input_variables=["question", "chat_history"])
|
| 56 |
return qa_prompt, condense_prompt
|
| 57 |
|
| 58 |
def memory(self, inputs, outputs, last_k=3):
|
|
|
|
| 68 |
def delete_first_conversation(self):
|
| 69 |
if self.history:
|
| 70 |
self.history.pop(0)
|
| 71 |
+
|
| 72 |
def delete_last_conversation(self):
|
| 73 |
if len(self.history) > 0:
|
| 74 |
self.history.pop()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 75 |
|
| 76 |
+
def save_history(self, chatbot, file_name):
|
| 77 |
+
message = upsert_item(self.user_identifier, file_name, self.history, chatbot)
|
| 78 |
+
return message
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
|
| 80 |
+
def load_history(self, file_name):
|
| 81 |
+
items = read_item(self.user_identifier, file_name)
|
| 82 |
+
return items['id'], items['chatbot']
|
| 83 |
|
| 84 |
+
def delete_history(self, file_name):
|
| 85 |
+
message = delete_items(self.user_identifier, file_name)
|
| 86 |
+
return message, get_history_names(False, self.user_identifier), []
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 87 |
|
| 88 |
+
def audio_response(self, audio):
|
| 89 |
+
media_file = open(audio, 'rb')
|
| 90 |
+
response = openai.Audio.transcribe(
|
| 91 |
+
api_key=API_KEY,
|
| 92 |
+
model=MODEL_ID,
|
| 93 |
+
file=media_file
|
| 94 |
+
)
|
| 95 |
+
return response["text"], None
|
| 96 |
|
| 97 |
+
def inference(self, inputs, chatbot, streaming=False, upload_files_btn=False, custom_websearch=False,
|
| 98 |
+
local_db=False,
|
| 99 |
+
**kwargs):
|
| 100 |
+
if upload_files_btn or local_db:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 101 |
status_text = "Indexing files to vector database"
|
| 102 |
yield chatbot, status_text
|
| 103 |
+
vectorstore = upload_file(upload_files_btn)
|
| 104 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 105 |
qa_prompt, condense_prompt = self.format_prompt(**kwargs)
|
| 106 |
job_done = object() # signals the processing is done
|
| 107 |
q = SimpleQueue()
|
| 108 |
if streaming:
|
| 109 |
timeout = TIMEOUT_STREAM
|
| 110 |
+
streaming_callback = [StreamingGradioCallbackHandler(q)]
|
| 111 |
|
| 112 |
# Define llm model
|
| 113 |
+
llm = CustomAzureOpenAI(deployment_name=DEPLOYMENT_ID,
|
| 114 |
openai_api_type=OPENAI_API_TYPE,
|
| 115 |
openai_api_base=OPENAI_API_BASE,
|
| 116 |
openai_api_version=OPENAI_API_VERSION,
|
| 117 |
openai_api_key=OPENAI_API_KEY,
|
| 118 |
temperature=self.temperature,
|
| 119 |
+
model_kwargs={"top_p": self.top_p},
|
| 120 |
+
streaming=streaming, \
|
| 121 |
+
callbacks=streaming_callback,
|
| 122 |
request_timeout=timeout)
|
| 123 |
+
|
| 124 |
+
condense_llm = CustomAzureOpenAI(deployment_name=self.condense_model_name,
|
| 125 |
+
openai_api_type=OPENAI_API_TYPE,
|
| 126 |
+
openai_api_base=OPENAI_API_BASE,
|
| 127 |
+
openai_api_version=OPENAI_API_VERSION,
|
| 128 |
+
openai_api_key=OPENAI_API_KEY,
|
| 129 |
+
temperature=self.temperature)
|
| 130 |
|
| 131 |
status_text = "Request URL: " + OPENAI_API_BASE
|
| 132 |
yield chatbot, status_text
|
| 133 |
+
# Create a function to call - this will run in a thread
|
| 134 |
# Create a Queue object
|
| 135 |
response_queue = SimpleQueue()
|
| 136 |
|
| 137 |
def task():
|
| 138 |
+
# Conversation + RetrivalChain
|
| 139 |
+
qa = CustomConversationalRetrievalChain.from_llm(llm, vectorstore.as_retriever(
|
| 140 |
+
search_type="similarity_score_threshold", search_kwargs={"score_threshold": 0.75}),
|
| 141 |
+
condense_question_llm=condense_llm, verbose=True,
|
| 142 |
+
condense_question_prompt=condense_prompt,
|
| 143 |
+
combine_docs_chain_kwargs={"prompt": qa_prompt},
|
| 144 |
+
return_source_documents=True)
|
| 145 |
# query with input and chat history
|
| 146 |
response = qa({"question": inputs, "chat_history": self.history})
|
| 147 |
response_queue.put(response)
|
| 148 |
q.put(job_done)
|
| 149 |
+
|
|
|
|
| 150 |
thread = Thread(target=task)
|
| 151 |
thread.start()
|
| 152 |
chatbot.append((inputs, ""))
|
|
|
|
| 165 |
# add citation info to response
|
| 166 |
response = response_queue.get()
|
| 167 |
relevant_docs = response["source_documents"]
|
| 168 |
+
if len(relevant_docs) == 0:
|
| 169 |
+
display_append = ""
|
| 170 |
+
else:
|
| 171 |
+
if upload_files_btn:
|
| 172 |
+
reference_results = [d.page_content for d in relevant_docs]
|
| 173 |
+
reference_sources = [d.metadata["source"] for d in relevant_docs]
|
| 174 |
+
display_append = add_details(reference_results, reference_sources)
|
| 175 |
+
display_append = '<div class = "source-a">' + "\n".join(display_append) + '</div>'
|
| 176 |
+
else:
|
| 177 |
+
display_append = []
|
| 178 |
+
for idx, d in enumerate(relevant_docs):
|
| 179 |
+
link = d.metadata["source"]
|
| 180 |
+
title = d.page_content.split("\n")[0]
|
| 181 |
+
# Remove non word characters and blank space before title
|
| 182 |
+
title = re.sub(r"[^\w\s]", "", title[:4]).strip()
|
| 183 |
+
display_append.append(
|
| 184 |
+
f'<a href=\"{link}\" target=\"_blank\">[{idx + 1}] {title}</a>'
|
| 185 |
+
)
|
| 186 |
+
display_append = '<div class = "source-a">' + "\n".join(display_append) + '</div>'
|
| 187 |
+
chatbot[-1] = (chatbot[-1][0], content + display_append)
|
| 188 |
yield chatbot, status_text
|
| 189 |
|
| 190 |
self.memory(inputs, content)
|
| 191 |
+
# self.auto_save_history(chatbot)
|
| 192 |
thread.join()
|
| 193 |
|
| 194 |
+
else:
|
| 195 |
+
import requests
|
| 196 |
+
|
| 197 |
+
from langchain.utilities.google_search import GoogleSearchAPIWrapper
|
| 198 |
+
from chains.web_search import GoogleWebSearch
|
| 199 |
+
from config import GOOGLE_API_KEY, GOOGLE_CSE_ID
|
| 200 |
+
top_k = 4
|
| 201 |
+
|
| 202 |
+
if custom_websearch:
|
| 203 |
+
status_text = "Retrieving information from website FPTSoftware.com"
|
| 204 |
+
yield chatbot, status_text
|
| 205 |
+
params = {
|
| 206 |
+
"q": inputs,
|
| 207 |
+
"v": "\{539C9DC1-663A-418D-82A4-662D34EE34BC\}",
|
| 208 |
+
"p": 10,
|
| 209 |
+
"l": "en",
|
| 210 |
+
"s": "{EACE8DB5-668F-4357-9782-405070D28D11}",
|
| 211 |
+
"itemid": "\{91F4101E-B1F3-4905-A832-96F703D3FBB1\}",
|
| 212 |
+
}
|
| 213 |
+
req = requests.get(
|
| 214 |
+
"https://fptsoftware.com//sxa/search/results/?",
|
| 215 |
+
params=params
|
| 216 |
+
)
|
| 217 |
+
res = json.loads(req.text)
|
| 218 |
+
results = []
|
| 219 |
+
for r in res["Results"][:top_k]:
|
| 220 |
+
link = "https://fptsoftware.com" + r["Url"]
|
| 221 |
+
results.append({"link": link})
|
| 222 |
+
reference_results, display_append = web_citation(inputs, results, True)
|
| 223 |
+
|
| 224 |
+
reference_results = add_source_numbers(reference_results)
|
| 225 |
+
display_append = '<div class = "source-a">' + "\n".join(display_append) + '</div>'
|
| 226 |
+
status_text = "Request URL: " + OPENAI_API_BASE
|
| 227 |
+
yield chatbot, status_text
|
| 228 |
+
chatbot.append((inputs, ""))
|
| 229 |
+
web_search = GoogleWebSearch()
|
| 230 |
+
ai_response = web_search.predict(context="\n\n".join(reference_results), question=inputs,
|
| 231 |
+
chat_history=self.history)
|
| 232 |
+
|
| 233 |
+
chatbot[-1] = (chatbot[-1][0], ai_response + display_append)
|
| 234 |
+
self.memory(inputs, ai_response)
|
| 235 |
+
# self.auto_save_history(chatbot)
|
| 236 |
+
yield chatbot, status_text
|
| 237 |
+
|
| 238 |
+
|
| 239 |
+
else:
|
| 240 |
+
from chains.decision_maker import DecisionMaker
|
| 241 |
+
from chains.simple_chain import SimpleChain
|
| 242 |
+
decision_maker = DecisionMaker()
|
| 243 |
+
simple_chain = SimpleChain()
|
| 244 |
+
decision = decision_maker.predict(question=inputs)
|
| 245 |
+
if "LLM Model" in decision:
|
| 246 |
+
status_text = "Request URL: " + OPENAI_API_BASE
|
| 247 |
+
yield chatbot, status_text
|
| 248 |
+
chatbot.append((inputs, ""))
|
| 249 |
+
ai_response = simple_chain.predict(question=inputs)
|
| 250 |
+
chatbot[-1] = (chatbot[-1][0], ai_response)
|
| 251 |
+
self.memory(inputs, ai_response)
|
| 252 |
+
# self.auto_save_history(chatbot)
|
| 253 |
+
yield chatbot, status_text
|
| 254 |
+
else:
|
| 255 |
+
status_text = "Retrieving information from Google"
|
| 256 |
+
yield chatbot, status_text
|
| 257 |
+
search = GoogleSearchAPIWrapper(google_api_key=GOOGLE_API_KEY, google_cse_id=GOOGLE_CSE_ID)
|
| 258 |
+
results = search.results(inputs, num_results=top_k)
|
| 259 |
+
reference_results, display_append = web_citation(inputs, results, False)
|
| 260 |
+
|
| 261 |
+
reference_results = add_source_numbers(reference_results)
|
| 262 |
+
display_append = '<div class = "source-a">' + "\n".join(display_append) + '</div>'
|
| 263 |
+
status_text = "Request URL: " + OPENAI_API_BASE
|
| 264 |
+
yield chatbot, status_text
|
| 265 |
+
chatbot.append((inputs, ""))
|
| 266 |
+
web_search = GoogleWebSearch()
|
| 267 |
+
ai_response = web_search.predict(context="\n\n".join(reference_results), question=inputs,
|
| 268 |
+
chat_history=self.history)
|
| 269 |
+
|
| 270 |
+
chatbot[-1] = (chatbot[-1][0], ai_response + display_append)
|
| 271 |
+
self.memory(inputs, ai_response)
|
| 272 |
+
# self.auto_save_history(chatbot)
|
| 273 |
+
yield chatbot, status_text
|
| 274 |
+
|
| 275 |
+
|
| 276 |
if __name__ == '__main__':
|
| 277 |
import os
|
| 278 |
from config import OPENAI_API_KEY
|
|
|
|
| 281 |
ChatPromptTemplate,
|
| 282 |
SystemMessagePromptTemplate,
|
| 283 |
HumanMessagePromptTemplate)
|
| 284 |
+
|
| 285 |
+
SYSTEM_PROMPT_TEMPLATE = "You're a helpful assistant."
|
| 286 |
HUMAN_PROMPT_TEMPLATE = "Human: {question}\n AI answer:"
|
| 287 |
prompt = ChatPromptTemplate.from_messages(
|
| 288 |
[
|
|
|
|
| 290 |
HumanMessagePromptTemplate.from_template(HUMAN_PROMPT_TEMPLATE)
|
| 291 |
]
|
| 292 |
)
|
| 293 |
+
llm = CustomAzureOpenAI(deployment_name="binh-gpt",
|
|
|
|
| 294 |
openai_api_key=OPENAI_API_KEY,
|
| 295 |
openai_api_base=OPENAI_API_BASE,
|
| 296 |
openai_api_version=OPENAI_API_VERSION,
|
| 297 |
temperature=0,
|
| 298 |
+
model_kwargs={"top_p": 1.0}, )
|
| 299 |
llm_chain = LLMChain(
|
| 300 |
llm=llm,
|
| 301 |
prompt=prompt
|
chains/qaibot_chain.py
ADDED
|
@@ -0,0 +1,81 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import requests
|
| 2 |
+
import json
|
| 3 |
+
|
| 4 |
+
from chains.azure_openai import CustomAzureOpenAI
|
| 5 |
+
from chains.decision_maker import DecisionMaker
|
| 6 |
+
from chains.simple_chain import SimpleChain
|
| 7 |
+
from bs4 import BeautifulSoup
|
| 8 |
+
from chains.summary import WebSummary
|
| 9 |
+
from langchain.utilities.google_search import GoogleSearchAPIWrapper
|
| 10 |
+
from config import OPENAI_API_TYPE, OPENAI_API_VERSION, OPENAI_API_KEY, OPENAI_API_BASE, DEPLOYMENT_ID, GOOGLE_API_KEY, GOOGLE_CSE_ID
|
| 11 |
+
|
| 12 |
+
class QAIBotChain:
|
| 13 |
+
def __init__(self):
|
| 14 |
+
self.llm = CustomAzureOpenAI(deployment_name=DEPLOYMENT_ID,
|
| 15 |
+
openai_api_type=OPENAI_API_TYPE,
|
| 16 |
+
openai_api_base=OPENAI_API_BASE,
|
| 17 |
+
openai_api_version=OPENAI_API_VERSION,
|
| 18 |
+
openai_api_key=OPENAI_API_KEY,
|
| 19 |
+
temperature=0.0)
|
| 20 |
+
self.decision = DecisionMaker()
|
| 21 |
+
self.simple_chain = SimpleChain()
|
| 22 |
+
self.summary = WebSummary()
|
| 23 |
+
|
| 24 |
+
def run(self, question, custom_web_search=False, num_results=4):
|
| 25 |
+
if custom_web_search:
|
| 26 |
+
params = {
|
| 27 |
+
"q": question,
|
| 28 |
+
"v": "\{539C9DC1-663A-418D-82A4-662D34EE34BC\}",
|
| 29 |
+
"p": 10,
|
| 30 |
+
"l": "en",
|
| 31 |
+
"s": "{EACE8DB5-668F-4357-9782-405070D28D11}",
|
| 32 |
+
"itemid": "\{91F4101E-B1F3-4905-A832-96F703D3FBB1\}",
|
| 33 |
+
}
|
| 34 |
+
req = requests.get(
|
| 35 |
+
"https://fptsoftware.com//sxa/search/results/?",
|
| 36 |
+
params=params
|
| 37 |
+
)
|
| 38 |
+
res = json.loads(req.text)
|
| 39 |
+
results = []
|
| 40 |
+
for r in res["Results"][:num_results]:
|
| 41 |
+
link = "https://fptsoftware.com" + r["Url"]
|
| 42 |
+
results.append({"link": link})
|
| 43 |
+
else:
|
| 44 |
+
decision = self.decision.predict(question=question)
|
| 45 |
+
if "LLM Model" in decision:
|
| 46 |
+
ai_response = self.simple_chain.predict(question=question)
|
| 47 |
+
return ai_response, False
|
| 48 |
+
else:
|
| 49 |
+
search = GoogleSearchAPIWrapper(google_api_key=GOOGLE_API_KEY, google_cse_id=GOOGLE_CSE_ID)
|
| 50 |
+
results = search.results(question, num_results=num_results)
|
| 51 |
+
reference_results = []
|
| 52 |
+
display_append = []
|
| 53 |
+
for idx, result in enumerate(results):
|
| 54 |
+
try:
|
| 55 |
+
head = requests.head(result['link'])
|
| 56 |
+
if "text/html" in head.headers['Content-Type']:
|
| 57 |
+
html_response = requests.get(result['link'])
|
| 58 |
+
soup = BeautifulSoup(html_response.content, "html.parser")
|
| 59 |
+
if custom_web_search:
|
| 60 |
+
title = result["title"]
|
| 61 |
+
else:
|
| 62 |
+
title = soup.find_all('title')[0].get_text()
|
| 63 |
+
try:
|
| 64 |
+
text = soup.get_text()
|
| 65 |
+
lines = (line.strip() for line in text.splitlines())
|
| 66 |
+
# break multi-headlines into a line each
|
| 67 |
+
chunks = (phrase.strip() for line in lines for phrase in line.split(" "))
|
| 68 |
+
# drop blank lines
|
| 69 |
+
text = '\n'.join(chunk for chunk in chunks if chunk)
|
| 70 |
+
|
| 71 |
+
summary = self.web_summary.predict(question=question, doc=text)
|
| 72 |
+
print("Can access", result['link'])
|
| 73 |
+
except:
|
| 74 |
+
print("Cannot access ", result['link'])
|
| 75 |
+
reference_results.append([summary, result['link']])
|
| 76 |
+
display_append.append(
|
| 77 |
+
f'<a href=\"{result["link"]}\" target=\"_blank\">{idx + 1}. {title}</a>'
|
| 78 |
+
)
|
| 79 |
+
except:
|
| 80 |
+
continue
|
| 81 |
+
return reference_results, display_append
|
chains/related_question.py
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain.chains import LLMChain
|
| 2 |
+
from langchain.prompts.chat import (
|
| 3 |
+
ChatPromptTemplate,
|
| 4 |
+
SystemMessagePromptTemplate,
|
| 5 |
+
HumanMessagePromptTemplate)
|
| 6 |
+
from chains.azure_openai import CustomAzureOpenAI
|
| 7 |
+
from prompts.related_question import system_template, human_template
|
| 8 |
+
from config import OPENAI_API_TYPE, OPENAI_API_VERSION, OPENAI_API_KEY, OPENAI_API_BASE, DEPLOYMENT_ID, API_KEY
|
| 9 |
+
from langchain.chat_models import ChatOpenAI
|
| 10 |
+
|
| 11 |
+
class RelatedQuestion(LLMChain):
|
| 12 |
+
prompt = ChatPromptTemplate.from_messages(
|
| 13 |
+
[SystemMessagePromptTemplate.from_template(
|
| 14 |
+
system_template),
|
| 15 |
+
HumanMessagePromptTemplate.from_template(human_template)
|
| 16 |
+
])
|
| 17 |
+
llm = CustomAzureOpenAI(deployment_name=DEPLOYMENT_ID,
|
| 18 |
+
openai_api_type=OPENAI_API_TYPE,
|
| 19 |
+
openai_api_base=OPENAI_API_BASE,
|
| 20 |
+
openai_api_version=OPENAI_API_VERSION,
|
| 21 |
+
openai_api_key=OPENAI_API_KEY,
|
| 22 |
+
temperature=0.0)
|
| 23 |
+
|
| 24 |
+
if __name__ == "__main__":
|
| 25 |
+
rel = RelatedQuestion()
|
| 26 |
+
inputs = "Hello bot"
|
| 27 |
+
outputs = "Hello! How can I assist you today?"
|
| 28 |
+
import re
|
| 29 |
+
pattern = "\d. {*.?}"
|
| 30 |
+
|
| 31 |
+
res = rel.predict(inputs=inputs, outputs=outputs)
|
| 32 |
+
print(res)
|
| 33 |
+
out = list(map(lambda x: x.split('. ')[-1], res.split('\n')))
|
| 34 |
+
results = [[a] for a in out]
|
| 35 |
+
print(results)
|
chains/simple_chain.py
ADDED
|
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from langchain.chains.llm import LLMChain
|
| 2 |
+
from langchain.prompts.chat import (
|
| 3 |
+
ChatPromptTemplate,
|
| 4 |
+
SystemMessagePromptTemplate,
|
| 5 |
+
HumanMessagePromptTemplate)
|
| 6 |
+
from prompts.simple_chain import SYSTEM_PROMPT_TEMPLATE, HUMAN_PROMPT_TEMPLATE
|
| 7 |
+
from config import OPENAI_API_TYPE, OPENAI_API_VERSION, OPENAI_API_KEY, OPENAI_API_BASE, DEPLOYMENT_ID
|
| 8 |
+
from chains.azure_openai import CustomAzureOpenAI
|
| 9 |
+
|
| 10 |
+
|
| 11 |
+
class SimpleChain(LLMChain):
|
| 12 |
+
llm = CustomAzureOpenAI(deployment_name=DEPLOYMENT_ID,
|
| 13 |
+
openai_api_type=OPENAI_API_TYPE,
|
| 14 |
+
openai_api_base=OPENAI_API_BASE,
|
| 15 |
+
openai_api_version=OPENAI_API_VERSION,
|
| 16 |
+
openai_api_key=OPENAI_API_KEY,
|
| 17 |
+
temperature=0.0)
|
| 18 |
+
prompt = ChatPromptTemplate.from_messages(
|
| 19 |
+
[
|
| 20 |
+
SystemMessagePromptTemplate.from_template(SYSTEM_PROMPT_TEMPLATE),
|
| 21 |
+
HumanMessagePromptTemplate.from_template(HUMAN_PROMPT_TEMPLATE)
|
| 22 |
+
])
|
chains/summary.py
CHANGED
|
@@ -7,6 +7,8 @@ from chains.azure_openai import CustomAzureOpenAI
|
|
| 7 |
from prompts.summary import system_template, human_template
|
| 8 |
from config import OPENAI_API_TYPE, OPENAI_API_VERSION, OPENAI_API_KEY, OPENAI_API_BASE, DEPLOYMENT_ID, API_KEY
|
| 9 |
from langchain.chat_models import ChatOpenAI
|
|
|
|
|
|
|
| 10 |
|
| 11 |
class WebSummary(LLMChain):
|
| 12 |
prompt = ChatPromptTemplate.from_messages(
|
|
@@ -14,12 +16,28 @@ class WebSummary(LLMChain):
|
|
| 14 |
system_template),
|
| 15 |
HumanMessagePromptTemplate.from_template(human_template)
|
| 16 |
])
|
| 17 |
-
|
| 18 |
-
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 7 |
from prompts.summary import system_template, human_template
|
| 8 |
from config import OPENAI_API_TYPE, OPENAI_API_VERSION, OPENAI_API_KEY, OPENAI_API_BASE, DEPLOYMENT_ID, API_KEY
|
| 9 |
from langchain.chat_models import ChatOpenAI
|
| 10 |
+
import json
|
| 11 |
+
import requests
|
| 12 |
|
| 13 |
class WebSummary(LLMChain):
|
| 14 |
prompt = ChatPromptTemplate.from_messages(
|
|
|
|
| 16 |
system_template),
|
| 17 |
HumanMessagePromptTemplate.from_template(human_template)
|
| 18 |
])
|
| 19 |
+
llm = CustomAzureOpenAI(deployment_name=DEPLOYMENT_ID,
|
| 20 |
+
openai_api_type=OPENAI_API_TYPE,
|
| 21 |
+
openai_api_base=OPENAI_API_BASE,
|
| 22 |
+
openai_api_version=OPENAI_API_VERSION,
|
| 23 |
+
openai_api_key=OPENAI_API_KEY,
|
| 24 |
+
temperature=0.0)
|
| 25 |
+
def run(self, question, num_result=4):
|
| 26 |
+
params = {
|
| 27 |
+
"q": question,
|
| 28 |
+
"v": "\{539C9DC1-663A-418D-82A4-662D34EE34BC\}",
|
| 29 |
+
"p": 10,
|
| 30 |
+
"l": "en",
|
| 31 |
+
"s": "{EACE8DB5-668F-4357-9782-405070D28D11}",
|
| 32 |
+
"itemid": "\{91F4101E-B1F3-4905-A832-96F703D3FBB1\}",
|
| 33 |
+
}
|
| 34 |
+
req = requests.get(
|
| 35 |
+
"https://fptsoftware.com//sxa/search/results/?",
|
| 36 |
+
params=params
|
| 37 |
+
)
|
| 38 |
+
res = json.loads(req.text)
|
| 39 |
+
results = []
|
| 40 |
+
for r in res["Results"][:num_result]:
|
| 41 |
+
link = "https://fptsoftware.com" + r["Url"]
|
| 42 |
+
results.append({"link": link})
|
| 43 |
+
return results
|
chains/web_search.py
CHANGED
|
@@ -9,14 +9,11 @@ from chains.azure_openai import CustomAzureOpenAI
|
|
| 9 |
from langchain.chat_models import ChatOpenAI
|
| 10 |
|
| 11 |
class GoogleWebSearch(LLMChain):
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
# temperature=0.0)
|
| 18 |
-
llm = ChatOpenAI(model_name="gpt-4",
|
| 19 |
-
openai_api_key=API_KEY,
|
| 20 |
temperature=0.0)
|
| 21 |
prompt = ChatPromptTemplate.from_messages(
|
| 22 |
[
|
|
|
|
| 9 |
from langchain.chat_models import ChatOpenAI
|
| 10 |
|
| 11 |
class GoogleWebSearch(LLMChain):
|
| 12 |
+
llm = CustomAzureOpenAI(deployment_name=DEPLOYMENT_ID,
|
| 13 |
+
openai_api_type=OPENAI_API_TYPE,
|
| 14 |
+
openai_api_base=OPENAI_API_BASE,
|
| 15 |
+
openai_api_version=OPENAI_API_VERSION,
|
| 16 |
+
openai_api_key=OPENAI_API_KEY,
|
|
|
|
|
|
|
|
|
|
| 17 |
temperature=0.0)
|
| 18 |
prompt = ChatPromptTemplate.from_messages(
|
| 19 |
[
|
config.py
CHANGED
|
@@ -2,45 +2,59 @@ import os
|
|
| 2 |
|
| 3 |
# Folder
|
| 4 |
HISTORY_DIR = "history"
|
| 5 |
-
SAVE_DIR = "documents"
|
| 6 |
|
| 7 |
if not os.path.exists(HISTORY_DIR):
|
| 8 |
os.makedirs(HISTORY_DIR)
|
| 9 |
-
|
| 10 |
-
if not os.path.exists(SAVE_DIR):
|
| 11 |
-
os.makedirs(SAVE_DIR)
|
| 12 |
-
|
| 13 |
# Whisper API
|
| 14 |
-
API_KEY = "sk-
|
| 15 |
MODEL_ID = "whisper-1"
|
| 16 |
|
| 17 |
# Azure endpoint
|
| 18 |
OPENAI_API_TYPE = "azure"
|
| 19 |
OPENAI_API_VERSION = "2023-05-15"
|
| 20 |
# Embedding openai
|
| 21 |
-
EMBEDDING_API_KEY = "
|
| 22 |
EMBEDDING_API_BASE = "https://qaigpt2.openai.azure.com/"
|
| 23 |
EMBEDDING_DEPLOYMENT_ID = "embed"
|
| 24 |
# ChatGPT
|
| 25 |
-
|
| 26 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 27 |
DEPLOYMENT_ID = "gpt"
|
| 28 |
|
| 29 |
# Pinecone vector DB
|
| 30 |
PINECONE_API_KEY = "82b9902a-2908-4ece-88bf-483c413a91d7"
|
| 31 |
PINECONE_ENVIRONMENT = "us-west1-gcp-free"
|
| 32 |
INDEX_NAME = "text-indexing"
|
|
|
|
|
|
|
| 33 |
|
| 34 |
# Google search API
|
| 35 |
-
GOOGLE_API_KEY
|
| 36 |
-
GOOGLE_CSE_ID
|
| 37 |
|
| 38 |
# Custom google search API
|
| 39 |
CUSTOM_API_KEY = "AIzaSyDycFFOFtPg123bm9N3BRCy_q5gyEk7fzs"
|
| 40 |
-
CUSTOM_CSE_ID = "
|
| 41 |
|
| 42 |
# Local host
|
| 43 |
TIMEOUT_STREAM = 60
|
| 44 |
SEVER = "0.0.0.0"
|
| 45 |
PORT = 7860
|
| 46 |
DEBUG = True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 2 |
|
| 3 |
# Folder
|
| 4 |
HISTORY_DIR = "history"
|
|
|
|
| 5 |
|
| 6 |
if not os.path.exists(HISTORY_DIR):
|
| 7 |
os.makedirs(HISTORY_DIR)
|
| 8 |
+
|
|
|
|
|
|
|
|
|
|
| 9 |
# Whisper API
|
| 10 |
+
API_KEY = "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"
|
| 11 |
MODEL_ID = "whisper-1"
|
| 12 |
|
| 13 |
# Azure endpoint
|
| 14 |
OPENAI_API_TYPE = "azure"
|
| 15 |
OPENAI_API_VERSION = "2023-05-15"
|
| 16 |
# Embedding openai
|
| 17 |
+
EMBEDDING_API_KEY = "776f46ee3ba445ebb2ecaeb988bfd04a"
|
| 18 |
EMBEDDING_API_BASE = "https://qaigpt2.openai.azure.com/"
|
| 19 |
EMBEDDING_DEPLOYMENT_ID = "embed"
|
| 20 |
# ChatGPT
|
| 21 |
+
# EU Region
|
| 22 |
+
# OPENAI_API_KEY = "86f152e6a6ff46b1a71d5324a2823478"
|
| 23 |
+
# OPENAI_API_BASE ="https://qaigpteus2.openai.azure.com/"
|
| 24 |
+
# DEPLOYMENT_ID = "gpt"
|
| 25 |
+
|
| 26 |
+
# France Region
|
| 27 |
+
OPENAI_API_KEY = "94e749c64288478d8861fcaf3c1b415f"
|
| 28 |
+
OPENAI_API_BASE ="https://qaigptfr.openai.azure.com/"
|
| 29 |
DEPLOYMENT_ID = "gpt"
|
| 30 |
|
| 31 |
# Pinecone vector DB
|
| 32 |
PINECONE_API_KEY = "82b9902a-2908-4ece-88bf-483c413a91d7"
|
| 33 |
PINECONE_ENVIRONMENT = "us-west1-gcp-free"
|
| 34 |
INDEX_NAME = "text-indexing"
|
| 35 |
+
NAME_SPACE_1 = "documents"
|
| 36 |
+
NAME_SPACE_2 = "fanpage"
|
| 37 |
|
| 38 |
# Google search API
|
| 39 |
+
GOOGLE_API_KEY="AIzaSyBcwB4YIqjDcYr5XnPt5IrktqbH4Mb_1hE"
|
| 40 |
+
GOOGLE_CSE_ID="e61c62a86e2b848fd"
|
| 41 |
|
| 42 |
# Custom google search API
|
| 43 |
CUSTOM_API_KEY = "AIzaSyDycFFOFtPg123bm9N3BRCy_q5gyEk7fzs"
|
| 44 |
+
CUSTOM_CSE_ID = "a1bdbedc30f2b4790"
|
| 45 |
|
| 46 |
# Local host
|
| 47 |
TIMEOUT_STREAM = 60
|
| 48 |
SEVER = "0.0.0.0"
|
| 49 |
PORT = 7860
|
| 50 |
DEBUG = True
|
| 51 |
+
|
| 52 |
+
# Azure blob storage
|
| 53 |
+
CONNECTION_STRING = "DefaultEndpointsProtocol=https;AccountName=qaigpt;AccountKey=osgfH8+I/azlhNNn5Ps3jpYLgCfnXKuOZPQ4fkpwVX/tNISdyf8jhfq37lKxJSDIORgDPA7wPg5v+AStb47TRg==;EndpointSuffix=core.windows.net"
|
| 54 |
+
CONTAINER_NAME = "fptdocuments"
|
| 55 |
+
|
| 56 |
+
# Azure cosmos db
|
| 57 |
+
CREDENTIAL = "6t1RmAaF6onypDHHtAVrcnNOwPZpPusTiq9N5tHl8HpkDEPZn5y0TJbdCNBga1JuKBJaKlqnc09JACDblCPpbQ=="
|
| 58 |
+
ENDPOINT = 'https://qaigpt.documents.azure.com:443/'
|
| 59 |
+
DATABASE = "chat_gpt"
|
| 60 |
+
CONTAINER_COSMOS = "history"
|
cosmos_db.py
ADDED
|
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
from azure.cosmos import CosmosClient, PartitionKey
|
| 2 |
+
from config import ENDPOINT, CREDENTIAL, DATABASE, CONTAINER_COSMOS
|
| 3 |
+
from datetime import date, datetime
|
| 4 |
+
import json
|
| 5 |
+
|
| 6 |
+
_client = CosmosClient(
|
| 7 |
+
url=ENDPOINT,
|
| 8 |
+
credential=CREDENTIAL,
|
| 9 |
+
)
|
| 10 |
+
|
| 11 |
+
database = _client.create_database_if_not_exists(DATABASE)
|
| 12 |
+
_container = database.create_container_if_not_exists(
|
| 13 |
+
CONTAINER_COSMOS,
|
| 14 |
+
partition_key=PartitionKey("/user_id")
|
| 15 |
+
)
|
| 16 |
+
|
| 17 |
+
def json_serial(obj):
|
| 18 |
+
"""JSON serializer for objects not serializable by default json code"""
|
| 19 |
+
|
| 20 |
+
if isinstance(obj, (datetime, date)):
|
| 21 |
+
return obj.isoformat()
|
| 22 |
+
raise TypeError ("Type %s not serializable" % type(obj))
|
| 23 |
+
|
| 24 |
+
def upsert_item(user_id, file_name, history, chatbot):
|
| 25 |
+
response = _container.upsert_item(
|
| 26 |
+
body={
|
| 27 |
+
"id": file_name,
|
| 28 |
+
"user_id": user_id,
|
| 29 |
+
"date": json.dumps(datetime.utcnow(), default=json_serial),
|
| 30 |
+
"history": history,
|
| 31 |
+
"chatbot": chatbot
|
| 32 |
+
}
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
message = f'Upsert {file_name} succesfully'
|
| 36 |
+
return message
|
| 37 |
+
|
| 38 |
+
def read_item(user_id, file_name):
|
| 39 |
+
response = _container.read_item(item=file_name, partition_key=user_id)
|
| 40 |
+
return response
|
| 41 |
+
|
| 42 |
+
|
| 43 |
+
def query_items(user_id, file_name):
|
| 44 |
+
response = list(_container.query_items(
|
| 45 |
+
query="SELECT * FROM r WHERE r.user_id=@user_id AND r.id=@id",
|
| 46 |
+
parameters=[
|
| 47 |
+
{"name": "@user_id", "value": user_id}, {"name": "@id", "value": file_name}
|
| 48 |
+
],
|
| 49 |
+
enable_cross_partition_query=True
|
| 50 |
+
))
|
| 51 |
+
|
| 52 |
+
return response
|
| 53 |
+
|
| 54 |
+
def query_item(user_id):
|
| 55 |
+
response = list(_container.query_items(
|
| 56 |
+
query="SELECT * FROM r WHERE r.user_id=@user_id",
|
| 57 |
+
parameters=[
|
| 58 |
+
{"name": "@user_id", "value": user_id}
|
| 59 |
+
],
|
| 60 |
+
enable_cross_partition_query=True
|
| 61 |
+
))
|
| 62 |
+
return response
|
| 63 |
+
|
| 64 |
+
def delete_items(user_id, file_name):
|
| 65 |
+
response = _container.delete_item(item=file_name, partition_key=user_id)
|
| 66 |
+
message = f'Delete {file_name} succesfully'
|
| 67 |
+
return message
|
| 68 |
+
|
| 69 |
+
if __name__ == '__main__':
|
| 70 |
+
mes = query_item("khanh")
|
| 71 |
+
docs = [m["id"] for m in mes]
|
| 72 |
+
print(docs)
|
| 73 |
+
|
custom.css
ADDED
|
@@ -0,0 +1,1026 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
:root {
|
| 2 |
+
--chatbot-color-light: #000000;
|
| 3 |
+
--chatbot-color-dark: #FFFFFF;
|
| 4 |
+
--chatbot-background-color-light: #F3F3F3;
|
| 5 |
+
--chatbot-background-color-dark: #121111;
|
| 6 |
+
--message-user-background-color-light: #2685b5;
|
| 7 |
+
--message-user-background-color-dark: #2685b5;
|
| 8 |
+
--message-bot-background-color-light: #F3F3F3;
|
| 9 |
+
--message-bot-background-color-dark: #2C2C2C;
|
| 10 |
+
--switch-checkbox-color-light: #e9e9ec;
|
| 11 |
+
--switch-checkbox-color-dark: #515151;
|
| 12 |
+
--switch-checkbox-marked-color: #2685b5;
|
| 13 |
+
--cib-shadow-card: 0px 0.3px 0.9px rgba(0, 0, 0, 0.12), 0px 1.6px 3.6px rgba(0, 0, 0, 0.16);
|
| 14 |
+
--message-font-size: 15px;
|
| 15 |
+
--background-gradient: linear-gradient(90deg, rgb(239, 242, 247) 0%, 7.60286%, rgb(237, 240, 249) 15.2057%, 20.7513%, rgb(235, 239, 248) 26.297%, 27.6386%, rgb(235, 239, 248) 28.9803%, 38.2826%, rgb(231, 237, 249) 47.585%, 48.1216%, rgb(230, 236, 250) 48.6583%, 53.1306%, rgb(228, 236, 249) 57.6029%, 61.5385%, rgb(227, 234, 250) 65.4741%, 68.7835%, rgb(222, 234, 250) 72.093%, 75.7603%, rgb(219, 230, 248) 79.4275%, 82.8265%, rgb(216, 229, 248) 86.2254%, 87.8354%, rgb(213, 228, 249) 89.4454%, 91.8605%, rgb(210, 226, 249) 94.2755%, 95.4383%, rgb(209, 225, 248) 96.6011%, 98.3005%, rgb(208, 224, 247) 100%);
|
| 16 |
+
--background-gradient-dark: #0b0f19;
|
| 17 |
+
}
|
| 18 |
+
|
| 19 |
+
gradio-app {
|
| 20 |
+
background: var(--background-gradient) !important;
|
| 21 |
+
}
|
| 22 |
+
|
| 23 |
+
.dark gradio-app {
|
| 24 |
+
background: var(--background-gradient-dark) !important;
|
| 25 |
+
}
|
| 26 |
+
|
| 27 |
+
#app_title {
|
| 28 |
+
font-weight: var(--prose-header-text-weight);
|
| 29 |
+
font-size: var(--text-xxl);
|
| 30 |
+
line-height: 1.3;
|
| 31 |
+
text-align: left;
|
| 32 |
+
margin-top: 6px;
|
| 33 |
+
white-space: nowrap;
|
| 34 |
+
}
|
| 35 |
+
|
| 36 |
+
#description {
|
| 37 |
+
text-align: center;
|
| 38 |
+
margin: 32px 0 4px 0;
|
| 39 |
+
}
|
| 40 |
+
|
| 41 |
+
|
| 42 |
+
div.form {
|
| 43 |
+
background: none !important;
|
| 44 |
+
border: none !important;
|
| 45 |
+
}
|
| 46 |
+
|
| 47 |
+
|
| 48 |
+
#advanced_warning {
|
| 49 |
+
display: flex;
|
| 50 |
+
flex-wrap: wrap;
|
| 51 |
+
flex-direction: column;
|
| 52 |
+
align-content: center;
|
| 53 |
+
}
|
| 54 |
+
|
| 55 |
+
/* gradio的页脚信息 */
|
| 56 |
+
footer {
|
| 57 |
+
margin-top: 15px !important;
|
| 58 |
+
font-size: 85%;
|
| 59 |
+
display: inline-block;
|
| 60 |
+
text-align: center;
|
| 61 |
+
opacity: 0.60;
|
| 62 |
+
position: absolute;
|
| 63 |
+
max-height: 30px;
|
| 64 |
+
width: 100% !important;
|
| 65 |
+
}
|
| 66 |
+
|
| 67 |
+
footer[class^="svelte-"] {
|
| 68 |
+
display: none !important;
|
| 69 |
+
}
|
| 70 |
+
|
| 71 |
+
#footer {
|
| 72 |
+
text-align: center;
|
| 73 |
+
}
|
| 74 |
+
|
| 75 |
+
#footer div {
|
| 76 |
+
display: inline-block;
|
| 77 |
+
}
|
| 78 |
+
|
| 79 |
+
#footer .versions {
|
| 80 |
+
font-size: 85%;
|
| 81 |
+
opacity: 0.60;
|
| 82 |
+
}
|
| 83 |
+
|
| 84 |
+
#float_display {
|
| 85 |
+
position: absolute;
|
| 86 |
+
max-height: 30px;
|
| 87 |
+
}
|
| 88 |
+
|
| 89 |
+
#toast-update {
|
| 90 |
+
position: absolute;
|
| 91 |
+
display: flex;
|
| 92 |
+
top: -500px;
|
| 93 |
+
width: 100%;
|
| 94 |
+
justify-content: center;
|
| 95 |
+
z-index: var(--layer-top);
|
| 96 |
+
transition: top 0.3s ease-out;
|
| 97 |
+
}
|
| 98 |
+
|
| 99 |
+
#check-chuanhu-update {
|
| 100 |
+
position: absolute;
|
| 101 |
+
align-items: center;
|
| 102 |
+
display: flex;
|
| 103 |
+
flex-direction: column;
|
| 104 |
+
justify-content: center;
|
| 105 |
+
margin: var(--size-6) var(--size-4);
|
| 106 |
+
box-shadow: var(--shadow-drop-lg);
|
| 107 |
+
border: 1px solid var(--block-label-border-color);
|
| 108 |
+
border-radius: var(--container-radius);
|
| 109 |
+
background: var(--background-fill-primary);
|
| 110 |
+
padding: var(--size-4) var(--size-6);
|
| 111 |
+
min-width: 360px;
|
| 112 |
+
max-width: 480px;
|
| 113 |
+
overflow: hidden;
|
| 114 |
+
pointer-events: auto;
|
| 115 |
+
}
|
| 116 |
+
|
| 117 |
+
#version-info-title {
|
| 118 |
+
font-size: 1.2em;
|
| 119 |
+
font-weight: bold;
|
| 120 |
+
text-align: start;
|
| 121 |
+
width: 100%;
|
| 122 |
+
}
|
| 123 |
+
|
| 124 |
+
#release-note-wrap {
|
| 125 |
+
width: 100%;
|
| 126 |
+
max-width: 400px;
|
| 127 |
+
height: 120px;
|
| 128 |
+
border: solid 1px var(--border-color-primary);
|
| 129 |
+
overflow: auto;
|
| 130 |
+
padding: 0 8px;
|
| 131 |
+
}
|
| 132 |
+
|
| 133 |
+
#release-note-wrap.hideK {
|
| 134 |
+
display: none;
|
| 135 |
+
}
|
| 136 |
+
|
| 137 |
+
/*.chatrow {*/
|
| 138 |
+
/* gap: 0 !important;*/
|
| 139 |
+
/*}*/
|
| 140 |
+
|
| 141 |
+
.record-icon svelte-1thnwz {
|
| 142 |
+
display: none !important;
|
| 143 |
+
}
|
| 144 |
+
|
| 145 |
+
.btn, .audio-btn .sm.secondary, .mic-wrap, .audio-btn .sm.tertiary {
|
| 146 |
+
height: 48px !important;
|
| 147 |
+
margin: 0 !important;
|
| 148 |
+
padding: 0 5px 0 !important;
|
| 149 |
+
display: inline-block;
|
| 150 |
+
}
|
| 151 |
+
|
| 152 |
+
.audio-btn {
|
| 153 |
+
border: 0 !important;
|
| 154 |
+
position: relative;
|
| 155 |
+
background: none !important;
|
| 156 |
+
}
|
| 157 |
+
|
| 158 |
+
.mic-wrap {
|
| 159 |
+
position: absolute;
|
| 160 |
+
padding: 0 !important;
|
| 161 |
+
top: 0;
|
| 162 |
+
left: 0;
|
| 163 |
+
z-index: 1;
|
| 164 |
+
}
|
| 165 |
+
|
| 166 |
+
.audio-btn .sm.secondary, .audio-btn .sm.tertiary {
|
| 167 |
+
color: transparent;
|
| 168 |
+
border: none !important;
|
| 169 |
+
}
|
| 170 |
+
|
| 171 |
+
.audio-btn .record-icon {
|
| 172 |
+
display: none !important;
|
| 173 |
+
}
|
| 174 |
+
|
| 175 |
+
.dark .btn-send {
|
| 176 |
+
background: url('data:image/svg+xml,%3Csvg xmlns="http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg" width="20" height="20" viewBox="0 0 20 20"%3E%3Cpath fill="white" d="M2.724 2.053a.5.5 0 0 0-.707.576l1.498 5.618a.5.5 0 0 0 .4.364l6.855 1.142c.279.047.279.447 0 .494l-6.854 1.142a.5.5 0 0 0-.401.364l-1.498 5.618a.5.5 0 0 0 .707.576l15-7.5a.5.5 0 0 0 0-.894l-15-7.5Z"%2F%3E%3C%2Fsvg%3E') center no-repeat !important;
|
| 177 |
+
}
|
| 178 |
+
|
| 179 |
+
.dark .btn-clear {
|
| 180 |
+
background: url('data:image/svg+xml,%3Csvg xmlns="http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg" width="20" height="20" viewBox="0 0 16 16"%3E%3Cpath fill="white" fill-rule="evenodd" d="M15.963 7.23A8 8 0 0 1 .044 8.841a.75.75 0 0 1 1.492-.158a6.5 6.5 0 1 0 9.964-6.16V4.25a.75.75 0 0 1-1.5 0V0h4.25a.75.75 0 0 1 0 1.5h-1.586a8.001 8.001 0 0 1 3.299 5.73ZM7 2a1 1 0 1 0 0-2a1 1 0 0 0 0 2Zm-2.25.25a1 1 0 1 1-2 0a1 1 0 0 1 2 0ZM1.5 6a1 1 0 1 0 0-2a1 1 0 0 0 0 2Z" clip-rule="evenodd"%2F%3E%3C%2Fsvg%3E') center no-repeat !important;
|
| 181 |
+
}
|
| 182 |
+
|
| 183 |
+
.dark .btn-record, .dark .audio-btn .sm.secondary {
|
| 184 |
+
background: url('data:image/svg+xml,%3Csvg xmlns="http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg" width="20" height="20" viewBox="0 0 20 20"%3E%3Cpath fill="white" d="M4.5 10a.5.5 0 0 0-1 0a5.5 5.5 0 0 0 5 5.478V17.5a.5.5 0 0 0 1 0v-.706A5.48 5.48 0 0 1 9 14.5A4.5 4.5 0 0 1 4.5 10ZM12 5v4.6a5.514 5.514 0 0 0-2.79 3.393A3 3 0 0 1 6 10V5a3 3 0 0 1 6 0Zm5 9.5a2.5 2.5 0 1 1-5 0a2.5 2.5 0 0 1 5 0Zm2 0a4.5 4.5 0 1 1-9 0a4.5 4.5 0 0 1 9 0Zm-8 0a3.5 3.5 0 1 0 7 0a3.5 3.5 0 0 0-7 0Z"%2F%3E%3C%2Fsvg%3E') center no-repeat !important;
|
| 185 |
+
}
|
| 186 |
+
|
| 187 |
+
.dark .audio-btn .sm.tertiary {
|
| 188 |
+
background: url('data:image/svg+xml,%3Csvg xmlns="http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg" width="20" height="20" viewBox="0 0 24 24"%3E%3Cdefs%3E%3Cfilter id="svgSpinnersGooeyBalls10"%3E%3CfeGaussianBlur in="SourceGraphic" result="y" stdDeviation="1.5"%2F%3E%3CfeColorMatrix in="y" result="z" values="1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 18 -7"%2F%3E%3CfeBlend in="SourceGraphic" in2="z"%2F%3E%3C%2Ffilter%3E%3C%2Fdefs%3E%3Cg fill="white" filter="url(%23svgSpinnersGooeyBalls10)"%3E%3Ccircle cx="4" cy="12" r="3"%3E%3Canimate attributeName="cx" calcMode="spline" dur="0.75s" keySplines=".56%2C.52%2C.17%2C.98%3B.56%2C.52%2C.17%2C.98" repeatCount="indefinite" values="4%3B9%3B4"%2F%3E%3Canimate attributeName="r" calcMode="spline" dur="0.75s" keySplines=".56%2C.52%2C.17%2C.98%3B.56%2C.52%2C.17%2C.98" repeatCount="indefinite" values="3%3B8%3B3"%2F%3E%3C%2Fcircle%3E%3Ccircle cx="15" cy="12" r="8"%3E%3Canimate attributeName="cx" calcMode="spline" dur="0.75s" keySplines=".56%2C.52%2C.17%2C.98%3B.56%2C.52%2C.17%2C.98" repeatCount="indefinite" values="15%3B20%3B15"%2F%3E%3Canimate attributeName="r" calcMode="spline" dur="0.75s" keySplines=".56%2C.52%2C.17%2C.98%3B.56%2C.52%2C.17%2C.98" repeatCount="indefinite" values="8%3B3%3B8"%2F%3E%3C%2Fcircle%3E%3C%2Fg%3E%3C%2Fsvg%3E') center no-repeat !important;
|
| 189 |
+
}
|
| 190 |
+
|
| 191 |
+
.dark .btn-del {
|
| 192 |
+
background: url('data:image/svg+xml,%3Csvg xmlns="http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg" width="24" height="24" viewBox="0 0 24 24"%3E%3Cpath fill="white" d="m18 9l-5-5v3q0 .825.588 1.413T15 9h3Zm0 10.425L16.6 20.8q-.275.275-.688.288T15.2 20.8q-.275-.275-.275-.7t.275-.7l1.4-1.4l-1.4-1.4q-.275-.275-.275-.7t.275-.7q.275-.275.7-.275t.7.275l1.4 1.4l1.4-1.4q.275-.275.688-.287t.712.287q.275.275.275.7t-.275.7L19.425 18l1.375 1.4q.275.275.288.688t-.288.712q-.275.275-.7.275t-.7-.275L18 19.425ZM6 22q-.825 0-1.413-.588T4 20V4q0-.825.588-1.413T6 2h7.175q.4 0 .763.15t.637.425l4.85 4.85q.275.275.425.638t.15.762v3.525q-.475-.175-.988-.263T17.976 12q-2.5 0-4.237 1.738T12 17.974q0 1.125.4 2.163T13.55 22H6Z"%2F%3E%3C%2Fsvg%3E') center no-repeat !important;
|
| 193 |
+
}
|
| 194 |
+
|
| 195 |
+
.dark .btn-del-all {
|
| 196 |
+
background: url('data:image/svg+xml,%3Csvg xmlns="http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg" width="24" height="24" viewBox="0 0 24 24"%3E%3Cg fill="none" fill-rule="evenodd"%3E%3Cpath d="M24 0v24H0V0h24ZM12.593 23.258l-.011.002l-.071.035l-.02.004l-.014-.004l-.071-.035c-.01-.004-.019-.001-.024.005l-.004.01l-.017.428l.005.02l.01.013l.104.074l.015.004l.012-.004l.104-.074l.012-.016l.004-.017l-.017-.427c-.002-.01-.009-.017-.017-.018Zm.265-.113l-.013.002l-.185.093l-.01.01l-.003.011l.018.43l.005.012l.008.007l.201.093c.012.004.023 0 .029-.008l.004-.014l-.034-.614c-.003-.012-.01-.02-.02-.022Zm-.715.002a.023.023 0 0 0-.027.006l-.006.014l-.034.614c0 .012.007.02.017.024l.015-.002l.201-.093l.01-.008l.004-.011l.017-.43l-.003-.012l-.01-.01l-.184-.092Z"%2F%3E%3Cpath fill="white" d="M4 3a2 2 0 0 0-2 2v14a2 2 0 0 0 2 2h16a2 2 0 0 0 2-2V7.5a2 2 0 0 0-2-2h-7.52l-1.399-1.75A2 2 0 0 0 9.52 3H4Zm5.172 7.172a1 1 0 0 1 1.414 0L12 11.586l1.414-1.414a1 1 0 1 1 1.414 1.414L13.414 13l1.414 1.414a1 1 0 0 1-1.414 1.414L12 14.414l-1.414 1.414a1 1 0 1 1-1.414-1.414L10.586 13l-1.414-1.414a1 1 0 0 1 0-1.414Z"%2F%3E%3C%2Fg%3E%3C%2Fsvg%3E') center no-repeat !important;
|
| 197 |
+
}
|
| 198 |
+
|
| 199 |
+
.btn-send {
|
| 200 |
+
background: url('data:image/svg+xml,%3Csvg xmlns="http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg" width="20" height="20" viewBox="0 0 20 20"%3E%3Cpath fill="%232685b5" d="M2.724 2.053a.5.5 0 0 0-.707.576l1.498 5.618a.5.5 0 0 0 .4.364l6.855 1.142c.279.047.279.447 0 .494l-6.854 1.142a.5.5 0 0 0-.401.364l-1.498 5.618a.5.5 0 0 0 .707.576l15-7.5a.5.5 0 0 0 0-.894l-15-7.5Z"%2F%3E%3C%2Fsvg%3E') center no-repeat !important;
|
| 201 |
+
}
|
| 202 |
+
|
| 203 |
+
.btn-clear {
|
| 204 |
+
background: url('data:image/svg+xml,%3Csvg xmlns="http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg" width="20" height="20" viewBox="0 0 16 16"%3E%3Cpath fill="%232685b5" fill-rule="evenodd" d="M15.963 7.23A8 8 0 0 1 .044 8.841a.75.75 0 0 1 1.492-.158a6.5 6.5 0 1 0 9.964-6.16V4.25a.75.75 0 0 1-1.5 0V0h4.25a.75.75 0 0 1 0 1.5h-1.586a8.001 8.001 0 0 1 3.299 5.73ZM7 2a1 1 0 1 0 0-2a1 1 0 0 0 0 2Zm-2.25.25a1 1 0 1 1-2 0a1 1 0 0 1 2 0ZM1.5 6a1 1 0 1 0 0-2a1 1 0 0 0 0 2Z" clip-rule="evenodd"%2F%3E%3C%2Fsvg%3E') center no-repeat !important;
|
| 205 |
+
}
|
| 206 |
+
|
| 207 |
+
.btn-record, .audio-btn .sm.secondary {
|
| 208 |
+
background: url('data:image/svg+xml,%3Csvg xmlns="http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg" width="20" height="20" viewBox="0 0 20 20"%3E%3Cpath fill="%232685b5" d="M4.5 10a.5.5 0 0 0-1 0a5.5 5.5 0 0 0 5 5.478V17.5a.5.5 0 0 0 1 0v-.706A5.48 5.48 0 0 1 9 14.5A4.5 4.5 0 0 1 4.5 10ZM12 5v4.6a5.514 5.514 0 0 0-2.79 3.393A3 3 0 0 1 6 10V5a3 3 0 0 1 6 0Zm5 9.5a2.5 2.5 0 1 1-5 0a2.5 2.5 0 0 1 5 0Zm2 0a4.5 4.5 0 1 1-9 0a4.5 4.5 0 0 1 9 0Zm-8 0a3.5 3.5 0 1 0 7 0a3.5 3.5 0 0 0-7 0Z"%2F%3E%3C%2Fsvg%3E') center no-repeat !important;
|
| 209 |
+
}
|
| 210 |
+
|
| 211 |
+
.audio-btn .sm.tertiary {
|
| 212 |
+
background: url('data:image/svg+xml,%3Csvg xmlns="http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg" width="24" height="24" viewBox="0 0 24 24"%3E%3Cdefs%3E%3Cfilter id="svgSpinnersGooeyBalls10"%3E%3CfeGaussianBlur in="SourceGraphic" result="y" stdDeviation="1.5"%2F%3E%3CfeColorMatrix in="y" result="z" values="1 0 0 0 0 0 1 0 0 0 0 0 1 0 0 0 0 0 18 -7"%2F%3E%3CfeBlend in="SourceGraphic" in2="z"%2F%3E%3C%2Ffilter%3E%3C%2Fdefs%3E%3Cg fill="%232685b5" filter="url(%23svgSpinnersGooeyBalls10)"%3E%3Ccircle cx="4" cy="12" r="3"%3E%3Canimate attributeName="cx" calcMode="spline" dur="0.75s" keySplines=".56%2C.52%2C.17%2C.98%3B.56%2C.52%2C.17%2C.98" repeatCount="indefinite" values="4%3B9%3B4"%2F%3E%3Canimate attributeName="r" calcMode="spline" dur="0.75s" keySplines=".56%2C.52%2C.17%2C.98%3B.56%2C.52%2C.17%2C.98" repeatCount="indefinite" values="3%3B8%3B3"%2F%3E%3C%2Fcircle%3E%3Ccircle cx="15" cy="12" r="8"%3E%3Canimate attributeName="cx" calcMode="spline" dur="0.75s" keySplines=".56%2C.52%2C.17%2C.98%3B.56%2C.52%2C.17%2C.98" repeatCount="indefinite" values="15%3B20%3B15"%2F%3E%3Canimate attributeName="r" calcMode="spline" dur="0.75s" keySplines=".56%2C.52%2C.17%2C.98%3B.56%2C.52%2C.17%2C.98" repeatCount="indefinite" values="8%3B3%3B8"%2F%3E%3C%2Fcircle%3E%3C%2Fg%3E%3C%2Fsvg%3E') center no-repeat !important;
|
| 213 |
+
}
|
| 214 |
+
|
| 215 |
+
.btn-del {
|
| 216 |
+
background: url('data:image/svg+xml,%3Csvg xmlns="http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg" width="24" height="24" viewBox="0 0 24 24"%3E%3Cpath fill="%232685b5" d="m18 9l-5-5v3q0 .825.588 1.413T15 9h3Zm0 10.425L16.6 20.8q-.275.275-.688.288T15.2 20.8q-.275-.275-.275-.7t.275-.7l1.4-1.4l-1.4-1.4q-.275-.275-.275-.7t.275-.7q.275-.275.7-.275t.7.275l1.4 1.4l1.4-1.4q.275-.275.688-.287t.712.287q.275.275.275.7t-.275.7L19.425 18l1.375 1.4q.275.275.288.688t-.288.712q-.275.275-.7.275t-.7-.275L18 19.425ZM6 22q-.825 0-1.413-.588T4 20V4q0-.825.588-1.413T6 2h7.175q.4 0 .763.15t.637.425l4.85 4.85q.275.275.425.638t.15.762v3.525q-.475-.175-.988-.263T17.976 12q-2.5 0-4.237 1.738T12 17.974q0 1.125.4 2.163T13.55 22H6Z"%2F%3E%3C%2Fsvg%3E') center no-repeat !important;
|
| 217 |
+
}
|
| 218 |
+
|
| 219 |
+
.btn-del-all {
|
| 220 |
+
background: url('data:image/svg+xml,%3Csvg xmlns="http%3A%2F%2Fwww.w3.org%2F2000%2Fsvg" width="24" height="24" viewBox="0 0 24 24"%3E%3Cg fill="none" fill-rule="evenodd"%3E%3Cpath d="M24 0v24H0V0h24ZM12.593 23.258l-.011.002l-.071.035l-.02.004l-.014-.004l-.071-.035c-.01-.004-.019-.001-.024.005l-.004.01l-.017.428l.005.02l.01.013l.104.074l.015.004l.012-.004l.104-.074l.012-.016l.004-.017l-.017-.427c-.002-.01-.009-.017-.017-.018Zm.265-.113l-.013.002l-.185.093l-.01.01l-.003.011l.018.43l.005.012l.008.007l.201.093c.012.004.023 0 .029-.008l.004-.014l-.034-.614c-.003-.012-.01-.02-.02-.022Zm-.715.002a.023.023 0 0 0-.027.006l-.006.014l-.034.614c0 .012.007.02.017.024l.015-.002l.201-.093l.01-.008l.004-.011l.017-.43l-.003-.012l-.01-.01l-.184-.092Z"%2F%3E%3Cpath fill="%232685b5" d="M4 3a2 2 0 0 0-2 2v14a2 2 0 0 0 2 2h16a2 2 0 0 0 2-2V7.5a2 2 0 0 0-2-2h-7.52l-1.399-1.75A2 2 0 0 0 9.52 3H4Zm5.172 7.172a1 1 0 0 1 1.414 0L12 11.586l1.414-1.414a1 1 0 1 1 1.414 1.414L13.414 13l1.414 1.414a1 1 0 0 1-1.414 1.414L12 14.414l-1.414 1.414a1 1 0 1 1-1.414-1.414L10.586 13l-1.414-1.414a1 1 0 0 1 0-1.414Z"%2F%3E%3C%2Fg%3E%3C%2Fsvg%3E') center no-repeat !important;
|
| 221 |
+
}
|
| 222 |
+
|
| 223 |
+
|
| 224 |
+
.tooltip-btn, .audio-btn .sm.secondary, .audio-btn .sm.tertiary {
|
| 225 |
+
position: relative;
|
| 226 |
+
display: inline-block;
|
| 227 |
+
padding: 10px 20px;
|
| 228 |
+
border: 1px solid #ddd;
|
| 229 |
+
background-color: #f9f9f9;
|
| 230 |
+
cursor: pointer;
|
| 231 |
+
}
|
| 232 |
+
|
| 233 |
+
.tooltip-btn::after,
|
| 234 |
+
.tooltip-btn::before,
|
| 235 |
+
.audio-btn .sm.secondary::after,
|
| 236 |
+
.audio-btn .sm.secondary::before,
|
| 237 |
+
.audio-btn .sm.tertiary::after,
|
| 238 |
+
.audio-btn .sm.tertiary::before {
|
| 239 |
+
content: "";
|
| 240 |
+
position: absolute;
|
| 241 |
+
visibility: hidden;
|
| 242 |
+
top: 100%;
|
| 243 |
+
left: 50%;
|
| 244 |
+
transform: translateX(-50%);
|
| 245 |
+
transition: opacity 0.3s;
|
| 246 |
+
pointer-events: none;
|
| 247 |
+
}
|
| 248 |
+
|
| 249 |
+
.tooltip-content-send::after {
|
| 250 |
+
content: "Send message";
|
| 251 |
+
}
|
| 252 |
+
|
| 253 |
+
.tooltip-content-record::after, .audio-btn .sm.secondary::after {
|
| 254 |
+
content: "Use microphone";
|
| 255 |
+
}
|
| 256 |
+
|
| 257 |
+
.audio-btn .sm.tertiary::after {
|
| 258 |
+
content: "Stop recording";
|
| 259 |
+
}
|
| 260 |
+
|
| 261 |
+
.tooltip-content-clear::after {
|
| 262 |
+
content: "New topic";
|
| 263 |
+
}
|
| 264 |
+
|
| 265 |
+
.tooltip-del:after {
|
| 266 |
+
content: "Remove selected files";
|
| 267 |
+
}
|
| 268 |
+
|
| 269 |
+
.tooltip-del-all::after {
|
| 270 |
+
content: "Remove all files";
|
| 271 |
+
}
|
| 272 |
+
|
| 273 |
+
.tooltip-btn::after, .audio-btn .sm.secondary::after, .audio-btn .sm.tertiary::after {
|
| 274 |
+
background-color: #000;
|
| 275 |
+
color: #fff;
|
| 276 |
+
text-align: center;
|
| 277 |
+
font-size: 12px;
|
| 278 |
+
font-weight: bold;
|
| 279 |
+
padding: 5px;
|
| 280 |
+
border-radius: 6px;
|
| 281 |
+
z-index: 1;
|
| 282 |
+
white-space: nowrap;
|
| 283 |
+
opacity: 0;
|
| 284 |
+
margin-top: 10px; /* Spacing between the button and the tooltip */
|
| 285 |
+
}
|
| 286 |
+
|
| 287 |
+
.tooltip-btn::before, .audio-btn .sm.secondary::before, .audio-btn .sm.tertiary::before {
|
| 288 |
+
border: 5px solid transparent;
|
| 289 |
+
border-bottom-color: #000; /* Arrow color */
|
| 290 |
+
}
|
| 291 |
+
|
| 292 |
+
.tooltip-btn:hover::after,
|
| 293 |
+
.tooltip-btn:hover::before,
|
| 294 |
+
.audio-btn .sm.secondary:hover::after,
|
| 295 |
+
.audio-btn .sm.secondary:hover::before,
|
| 296 |
+
.audio-btn .sm.tertiary:hover::after,
|
| 297 |
+
.audio-btn .sm.tertiary:hover::before {
|
| 298 |
+
visibility: visible;
|
| 299 |
+
opacity: 0.8; /* Arrow and tooltip opacity */
|
| 300 |
+
}
|
| 301 |
+
|
| 302 |
+
|
| 303 |
+
.btn-update-group {
|
| 304 |
+
display: flex;
|
| 305 |
+
justify-content: space-evenly;
|
| 306 |
+
align-items: center;
|
| 307 |
+
width: 100%;
|
| 308 |
+
padding-top: 10px;
|
| 309 |
+
}
|
| 310 |
+
|
| 311 |
+
.btn-update-group.hideK {
|
| 312 |
+
display: none;
|
| 313 |
+
}
|
| 314 |
+
|
| 315 |
+
/* user_info */
|
| 316 |
+
#user_info.block {
|
| 317 |
+
white-space: nowrap;
|
| 318 |
+
position: absolute;
|
| 319 |
+
left: 13em;
|
| 320 |
+
top: -1em;
|
| 321 |
+
z-index: var(--layer-2);
|
| 322 |
+
box-shadow: var(--block-shadow);
|
| 323 |
+
border: none !important;
|
| 324 |
+
border-radius: 10px 10px 10px 0;
|
| 325 |
+
background: var(--color-accent);
|
| 326 |
+
padding: var(--block-label-padding);
|
| 327 |
+
font-size: var(--block-label-text-size);
|
| 328 |
+
line-height: var(--line-sm);
|
| 329 |
+
width: auto;
|
| 330 |
+
max-height: 30px !important;
|
| 331 |
+
opacity: 1;
|
| 332 |
+
transition: opacity 0.3s ease-in-out;
|
| 333 |
+
}
|
| 334 |
+
|
| 335 |
+
#user_info.block .wrap {
|
| 336 |
+
opacity: 0;
|
| 337 |
+
}
|
| 338 |
+
|
| 339 |
+
#user_info p {
|
| 340 |
+
color: white;
|
| 341 |
+
font-weight: var(--block-label-text-weight);
|
| 342 |
+
}
|
| 343 |
+
|
| 344 |
+
#user_info.hideK {
|
| 345 |
+
opacity: 0;
|
| 346 |
+
transition: opacity 1s ease-in-out;
|
| 347 |
+
}
|
| 348 |
+
|
| 349 |
+
/* status_display */
|
| 350 |
+
#status_display {
|
| 351 |
+
margin-bottom: 10px;
|
| 352 |
+
display: flex;
|
| 353 |
+
min-height: 2em;
|
| 354 |
+
align-items: flex-end;
|
| 355 |
+
justify-content: flex-end;
|
| 356 |
+
}
|
| 357 |
+
|
| 358 |
+
#status_display p {
|
| 359 |
+
font-size: .85em;
|
| 360 |
+
font-family: ui-monospace, "SF Mono", "SFMono-Regular", "Menlo", "Consolas", "Liberation Mono", "Microsoft Yahei UI", "Microsoft Yahei", monospace;
|
| 361 |
+
color: var(--body-text-color-subdued);
|
| 362 |
+
}
|
| 363 |
+
|
| 364 |
+
#status_display {
|
| 365 |
+
transition: all 0.6s;
|
| 366 |
+
}
|
| 367 |
+
|
| 368 |
+
#chuanhu_chatbot {
|
| 369 |
+
transition: height 0.3s ease;
|
| 370 |
+
}
|
| 371 |
+
|
| 372 |
+
/* usage_display */
|
| 373 |
+
.insert_block {
|
| 374 |
+
position: relative;
|
| 375 |
+
margin: 0;
|
| 376 |
+
padding: 8px 12px;
|
| 377 |
+
box-shadow: var(--block-shadow);
|
| 378 |
+
border-width: var(--block-border-width);
|
| 379 |
+
border-color: var(--block-border-color);
|
| 380 |
+
border-radius: var(--block-radius);
|
| 381 |
+
background: var(--block-background-fill);
|
| 382 |
+
width: 100%;
|
| 383 |
+
line-height: var(--line-sm);
|
| 384 |
+
min-height: 2em;
|
| 385 |
+
}
|
| 386 |
+
|
| 387 |
+
#usage_display p, #usage_display span {
|
| 388 |
+
margin: 0;
|
| 389 |
+
font-size: .85em;
|
| 390 |
+
color: var(--body-text-color-subdued);
|
| 391 |
+
}
|
| 392 |
+
|
| 393 |
+
.progress-bar {
|
| 394 |
+
background-color: var(--input-background-fill);
|
| 395 |
+
margin: .5em 0 !important;
|
| 396 |
+
height: 20px;
|
| 397 |
+
border-radius: 10px;
|
| 398 |
+
overflow: hidden;
|
| 399 |
+
}
|
| 400 |
+
|
| 401 |
+
.progress {
|
| 402 |
+
background-color: var(--block-title-background-fill);
|
| 403 |
+
height: 100%;
|
| 404 |
+
border-radius: 10px;
|
| 405 |
+
text-align: right;
|
| 406 |
+
transition: width 0.5s ease-in-out;
|
| 407 |
+
}
|
| 408 |
+
|
| 409 |
+
.progress-text {
|
| 410 |
+
/* color: white; */
|
| 411 |
+
display: none !important;
|
| 412 |
+
/*color: var(--color-accent) !important;*/
|
| 413 |
+
/*font-size: 1em !important;*/
|
| 414 |
+
/*font-weight: bold;*/
|
| 415 |
+
/*padding-right: 10px;*/
|
| 416 |
+
/*line-height: 20px;*/
|
| 417 |
+
}
|
| 418 |
+
|
| 419 |
+
/* 亮暗色模式切换 */
|
| 420 |
+
#apSwitch input[type="checkbox"] {
|
| 421 |
+
margin: 0 !important;
|
| 422 |
+
}
|
| 423 |
+
|
| 424 |
+
#apSwitch label.apSwitch {
|
| 425 |
+
display: flex;
|
| 426 |
+
align-items: center;
|
| 427 |
+
cursor: pointer;
|
| 428 |
+
color: var(--body-text-color);
|
| 429 |
+
font-weight: var(--checkbox-label-text-weight);
|
| 430 |
+
font-size: var(--checkbox-label-text-size);
|
| 431 |
+
line-height: var(--line-md);
|
| 432 |
+
margin: 2px 0 !important;
|
| 433 |
+
}
|
| 434 |
+
|
| 435 |
+
input[type="checkbox"]#apSwitch_checkbox::before {
|
| 436 |
+
background: none !important;
|
| 437 |
+
content: '🌞';
|
| 438 |
+
border: none !important;
|
| 439 |
+
box-shadow: none !important;
|
| 440 |
+
font-size: 22px;
|
| 441 |
+
top: -4.4px;
|
| 442 |
+
left: -1px;
|
| 443 |
+
}
|
| 444 |
+
|
| 445 |
+
input:checked[type="checkbox"]#apSwitch_checkbox::before {
|
| 446 |
+
content: '🌚';
|
| 447 |
+
left: 16px;
|
| 448 |
+
}
|
| 449 |
+
|
| 450 |
+
/* .apSwitch {
|
| 451 |
+
top: 2px;
|
| 452 |
+
display: inline-block;
|
| 453 |
+
height: 22px;
|
| 454 |
+
position: relative;
|
| 455 |
+
width: 40px;
|
| 456 |
+
border-radius: 11px;
|
| 457 |
+
box-shadow: inset 0 0 1px 0 rgba(0,0,0,0.05), inset 0 0 2px 0 rgba(0,0,0,0.08) !important;
|
| 458 |
+
}
|
| 459 |
+
.apSwitch input {
|
| 460 |
+
display: none !important;
|
| 461 |
+
}
|
| 462 |
+
.apSlider {
|
| 463 |
+
background-color: var(--neutral-200);
|
| 464 |
+
bottom: 0;
|
| 465 |
+
cursor: pointer;
|
| 466 |
+
left: 0;
|
| 467 |
+
position: absolute;
|
| 468 |
+
right: 0;
|
| 469 |
+
top: 0;
|
| 470 |
+
transition: .4s;
|
| 471 |
+
font-size: 22px;
|
| 472 |
+
border-radius: 11px;
|
| 473 |
+
}
|
| 474 |
+
.apSlider::before {
|
| 475 |
+
transform: scale(0.9);
|
| 476 |
+
position: absolute;
|
| 477 |
+
transition: .4s;
|
| 478 |
+
content: "🌞";
|
| 479 |
+
}
|
| 480 |
+
input:checked + .apSlider {
|
| 481 |
+
background-color: var(--primary-600);
|
| 482 |
+
}
|
| 483 |
+
input:checked + .apSlider::before {
|
| 484 |
+
transform: translateX(18px);
|
| 485 |
+
content:"🌚";
|
| 486 |
+
} */
|
| 487 |
+
|
| 488 |
+
.switch_checkbox label {
|
| 489 |
+
flex-direction: row-reverse;
|
| 490 |
+
justify-content: space-between;
|
| 491 |
+
}
|
| 492 |
+
|
| 493 |
+
.switch_checkbox input[type="checkbox"] + span {
|
| 494 |
+
margin-left: 0 !important;
|
| 495 |
+
}
|
| 496 |
+
|
| 497 |
+
.switch_checkbox input[type="checkbox"] {
|
| 498 |
+
-moz-appearance: none;
|
| 499 |
+
appearance: none;
|
| 500 |
+
-webkit-appearance: none;
|
| 501 |
+
outline: none;
|
| 502 |
+
}
|
| 503 |
+
|
| 504 |
+
.switch_checkbox input[type="checkbox"] {
|
| 505 |
+
display: inline-block !important;
|
| 506 |
+
position: relative !important;
|
| 507 |
+
border: none !important;
|
| 508 |
+
outline: none;
|
| 509 |
+
width: 40px !important;
|
| 510 |
+
height: 22px !important;
|
| 511 |
+
border-radius: 11px !important;
|
| 512 |
+
background-image: none !important;
|
| 513 |
+
box-shadow: inset 0 0 1px 0 rgba(0, 0, 0, 0.05), inset 0 0 2px 0 rgba(0, 0, 0, 0.08) !important;
|
| 514 |
+
background-color: var(--switch-checkbox-color-light) !important;
|
| 515 |
+
transition: .2s ease background-color;
|
| 516 |
+
}
|
| 517 |
+
|
| 518 |
+
.dark .switch_checkbox input[type="checkbox"] {
|
| 519 |
+
background-color: var(--switch-checkbox-color-light) !important;
|
| 520 |
+
}
|
| 521 |
+
|
| 522 |
+
.switch_checkbox input[type="checkbox"]::before {
|
| 523 |
+
content: "";
|
| 524 |
+
position: absolute;
|
| 525 |
+
width: 22px;
|
| 526 |
+
height: 22px;
|
| 527 |
+
top: 0;
|
| 528 |
+
left: 0;
|
| 529 |
+
background: #FFFFFF;
|
| 530 |
+
border: 0.5px solid rgba(0, 0, 0, 0.02);
|
| 531 |
+
box-shadow: 0 0 0 0 rgba(0, 0, 0, 0.15), 0 1px 0 0 rgba(0, 0, 0, 0.05);
|
| 532 |
+
transform: scale(0.9);
|
| 533 |
+
border-radius: 11px !important;
|
| 534 |
+
transition: .4s ease all;
|
| 535 |
+
box-shadow: var(--input-shadow);
|
| 536 |
+
}
|
| 537 |
+
|
| 538 |
+
.switch_checkbox input:checked[type="checkbox"] {
|
| 539 |
+
background-color: var(--switch-checkbox-marked-color) !important;
|
| 540 |
+
}
|
| 541 |
+
|
| 542 |
+
.switch_checkbox input:checked[type="checkbox"]::before {
|
| 543 |
+
background-color: #fff;
|
| 544 |
+
left: 18px;
|
| 545 |
+
}
|
| 546 |
+
|
| 547 |
+
/* Override Slider Styles (for webkit browsers like Safari and Chrome)
|
| 548 |
+
* 好希望这份提案能早日实现 https://github.com/w3c/csswg-drafts/issues/4410
|
| 549 |
+
* 进度滑块在各个平台还是太不统一了
|
| 550 |
+
*/
|
| 551 |
+
|
| 552 |
+
/* input[type="range"] {
|
| 553 |
+
-webkit-appearance: none;
|
| 554 |
+
height: 4px;
|
| 555 |
+
background: var(--input-background-fill);
|
| 556 |
+
border-radius: 5px;
|
| 557 |
+
background-image: linear-gradient(var(--primary-500),var(--primary-500));
|
| 558 |
+
background-size: 0% 100%;
|
| 559 |
+
background-repeat: no-repeat;
|
| 560 |
+
} */
|
| 561 |
+
input[type="range"] {
|
| 562 |
+
height: 4px;
|
| 563 |
+
border-radius: 5px;
|
| 564 |
+
}
|
| 565 |
+
|
| 566 |
+
input[type="range"]::-webkit-slider-thumb {
|
| 567 |
+
-webkit-appearance: none;
|
| 568 |
+
height: 20px;
|
| 569 |
+
width: 20px;
|
| 570 |
+
border-radius: 50%;
|
| 571 |
+
border: solid 0.5px #ddd;
|
| 572 |
+
background-color: white;
|
| 573 |
+
cursor: ew-resize;
|
| 574 |
+
box-shadow: var(--input-shadow);
|
| 575 |
+
transition: background-color .1s ease;
|
| 576 |
+
}
|
| 577 |
+
|
| 578 |
+
input[type="range"]::-webkit-slider-thumb:hover {
|
| 579 |
+
background: var(--neutral-50);
|
| 580 |
+
}
|
| 581 |
+
|
| 582 |
+
input[type=range]::-webkit-slider-runnable-track {
|
| 583 |
+
-webkit-appearance: none;
|
| 584 |
+
box-shadow: none;
|
| 585 |
+
border: none;
|
| 586 |
+
background: transparent;
|
| 587 |
+
}
|
| 588 |
+
|
| 589 |
+
hr.append-display {
|
| 590 |
+
margin: 8px 0;
|
| 591 |
+
border: none;
|
| 592 |
+
height: 1px;
|
| 593 |
+
border-top-width: 0;
|
| 594 |
+
background-image: linear-gradient(to right, rgba(50, 50, 50, 0.1), rgba(150, 150, 150, 0.8), rgba(50, 50, 50, 0.1));
|
| 595 |
+
}
|
| 596 |
+
|
| 597 |
+
.source-a {
|
| 598 |
+
font-size: 0.8em;
|
| 599 |
+
max-width: 100%;
|
| 600 |
+
margin: 0;
|
| 601 |
+
display: flex;
|
| 602 |
+
flex-direction: row;
|
| 603 |
+
flex-wrap: wrap;
|
| 604 |
+
align-items: center;
|
| 605 |
+
/* background-color: #dddddd88; */
|
| 606 |
+
border-radius: 1.5rem;
|
| 607 |
+
padding: 0.2em;
|
| 608 |
+
}
|
| 609 |
+
|
| 610 |
+
.source-a a, .source-a details {
|
| 611 |
+
display: inline-block;
|
| 612 |
+
background-color: #aaaaaa50;
|
| 613 |
+
border-radius: 1rem;
|
| 614 |
+
padding: 0.5em;
|
| 615 |
+
text-align: center;
|
| 616 |
+
text-overflow: ellipsis;
|
| 617 |
+
overflow: hidden;
|
| 618 |
+
min-width: 40%;
|
| 619 |
+
white-space: nowrap;
|
| 620 |
+
margin: 0.2rem 0.1rem;
|
| 621 |
+
text-decoration: none !important;
|
| 622 |
+
flex: 1;
|
| 623 |
+
transition: flex 0.5s;
|
| 624 |
+
}
|
| 625 |
+
|
| 626 |
+
.source-a details > p {
|
| 627 |
+
background-color: #aaaaaa50;
|
| 628 |
+
border-radius: 1rem;
|
| 629 |
+
padding: 0.5em;
|
| 630 |
+
text-overflow: ellipsis;
|
| 631 |
+
text-align: left !important;
|
| 632 |
+
overflow: hidden;
|
| 633 |
+
white-space: pre !important;
|
| 634 |
+
margin: 0.2rem 0.1rem;
|
| 635 |
+
text-decoration: none !important;
|
| 636 |
+
flex: 1;
|
| 637 |
+
transition: flex 0.5s;
|
| 638 |
+
}
|
| 639 |
+
|
| 640 |
+
.source-a a:hover, .source-a details:hover {
|
| 641 |
+
background-color: #aaaaaa20;
|
| 642 |
+
flex: 2;
|
| 643 |
+
}
|
| 644 |
+
|
| 645 |
+
#submit_btn, #cancel_btn {
|
| 646 |
+
height: 42px !important;
|
| 647 |
+
}
|
| 648 |
+
|
| 649 |
+
#submit_btn::before {
|
| 650 |
+
content: url("data:image/svg+xml, %3Csvg width='21px' height='20px' viewBox='0 0 21 20' version='1.1' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'%3E %3Cg id='page' stroke='none' stroke-width='1' fill='none' fill-rule='evenodd'%3E %3Cg id='send' transform='translate(0.435849, 0.088463)' fill='%23FFFFFF' fill-rule='nonzero'%3E %3Cpath d='M0.579148261,0.0428666046 C0.301105539,-0.0961547561 -0.036517765,0.122307382 0.0032026237,0.420210298 L1.4927172,18.1553639 C1.5125774,18.4334066 1.79062012,18.5922882 2.04880264,18.4929872 L8.24518329,15.8913017 L11.6412765,19.7441794 C11.8597387,19.9825018 12.2370824,19.8832008 12.3165231,19.5852979 L13.9450591,13.4882182 L19.7839562,11.0255541 C20.0619989,10.8865327 20.0818591,10.4694687 19.7839562,10.3105871 L0.579148261,0.0428666046 Z M11.6138902,17.0883151 L9.85385903,14.7195502 L0.718169621,0.618812241 L12.69945,12.9346347 L11.6138902,17.0883151 Z' id='shape'%3E%3C/path%3E %3C/g%3E %3C/g%3E %3C/svg%3E");
|
| 651 |
+
height: 21px;
|
| 652 |
+
}
|
| 653 |
+
|
| 654 |
+
#cancel_btn::before {
|
| 655 |
+
content: url("data:image/svg+xml,%3Csvg width='21px' height='21px' viewBox='0 0 21 21' version='1.1' xmlns='http://www.w3.org/2000/svg' xmlns:xlink='http://www.w3.org/1999/xlink'%3E %3Cg id='pg' stroke='none' stroke-width='1' fill='none' fill-rule='evenodd'%3E %3Cpath d='M10.2072007,20.088463 C11.5727865,20.088463 12.8594566,19.8259823 14.067211,19.3010209 C15.2749653,18.7760595 16.3386126,18.0538087 17.2581528,17.1342685 C18.177693,16.2147282 18.8982283,15.1527965 19.4197586,13.9484733 C19.9412889,12.7441501 20.202054,11.4557644 20.202054,10.0833163 C20.202054,8.71773046 19.9395733,7.43106036 19.4146119,6.22330603 C18.8896505,5.01555169 18.1673997,3.95018885 17.2478595,3.0272175 C16.3283192,2.10424615 15.2646719,1.3837109 14.0569176,0.865611739 C12.8491633,0.34751258 11.5624932,0.088463 10.1969073,0.088463 C8.83132146,0.088463 7.54636692,0.34751258 6.34204371,0.865611739 C5.1377205,1.3837109 4.07407321,2.10424615 3.15110186,3.0272175 C2.22813051,3.95018885 1.5058797,5.01555169 0.984349419,6.22330603 C0.46281914,7.43106036 0.202054,8.71773046 0.202054,10.0833163 C0.202054,11.4557644 0.4645347,12.7441501 0.9894961,13.9484733 C1.5144575,15.1527965 2.23670831,16.2147282 3.15624854,17.1342685 C4.07578877,18.0538087 5.1377205,18.7760595 6.34204371,19.3010209 C7.54636692,19.8259823 8.83475258,20.088463 10.2072007,20.088463 Z M10.2072007,18.2562448 C9.07493099,18.2562448 8.01471483,18.0452309 7.0265522,17.6232031 C6.03838956,17.2011753 5.17031614,16.6161693 4.42233192,15.8681851 C3.6743477,15.1202009 3.09105726,14.2521274 2.67246059,13.2639648 C2.25386392,12.2758022 2.04456558,11.215586 2.04456558,10.0833163 C2.04456558,8.95104663 2.25386392,7.89083047 2.67246059,6.90266784 C3.09105726,5.9145052 3.6743477,5.04643178 4.42233192,4.29844756 C5.17031614,3.55046334 6.036674,2.9671729 7.02140552,2.54857623 C8.00613703,2.12997956 9.06463763,1.92068122 10.1969073,1.92068122 C11.329177,1.92068122 12.3911087,2.12997956 13.3827025,2.54857623 C14.3742962,2.9671729 15.2440852,3.55046334 15.9920694,4.29844756 C16.7400537,5.04643178 17.3233441,5.9145052 17.7419408,6.90266784 C18.1605374,7.89083047 18.3698358,8.95104663 18.3698358,10.0833163 C18.3698358,11.215586 18.1605374,12.2758022 17.7419408,13.2639648 C17.3233441,14.2521274 16.7400537,15.1202009 15.9920694,15.8681851 C15.2440852,16.6161693 14.3760118,17.2011753 13.3878492,17.6232031 C12.3996865,18.0452309 11.3394704,18.2562448 10.2072007,18.2562448 Z M7.65444721,13.6242324 L12.7496608,13.6242324 C13.0584616,13.6242324 13.3003556,13.5384544 13.4753427,13.3668984 C13.6503299,13.1953424 13.7378234,12.9585951 13.7378234,12.6566565 L13.7378234,7.49968276 C13.7378234,7.19774418 13.6503299,6.96099688 13.4753427,6.78944087 C13.3003556,6.61788486 13.0584616,6.53210685 12.7496608,6.53210685 L7.65444721,6.53210685 C7.33878414,6.53210685 7.09345904,6.61788486 6.91847191,6.78944087 C6.74348478,6.96099688 6.65599121,7.19774418 6.65599121,7.49968276 L6.65599121,12.6566565 C6.65599121,12.9585951 6.74348478,13.1953424 6.91847191,13.3668984 C7.09345904,13.5384544 7.33878414,13.6242324 7.65444721,13.6242324 Z' id='shape' fill='%23FF3B30' fill-rule='nonzero'%3E%3C/path%3E %3C/g%3E %3C/svg%3E");
|
| 656 |
+
height: 21px;
|
| 657 |
+
}
|
| 658 |
+
|
| 659 |
+
/* list */
|
| 660 |
+
ol:not(.options), ul:not(.options) {
|
| 661 |
+
padding-inline-start: 2em !important;
|
| 662 |
+
}
|
| 663 |
+
|
| 664 |
+
/* 亮色(默认) */
|
| 665 |
+
#chuanhu_chatbot {
|
| 666 |
+
background-color: var(--chatbot-background-color-light) !important;
|
| 667 |
+
color: var(--chatbot-color-light) !important;
|
| 668 |
+
}
|
| 669 |
+
|
| 670 |
+
[data-testid = "bot"] {
|
| 671 |
+
background: var(--message-bot-background-color-light) !important;
|
| 672 |
+
box-shadow: var(--cib-shadow-card) !important;
|
| 673 |
+
outline: transparent solid 1px !important;
|
| 674 |
+
}
|
| 675 |
+
|
| 676 |
+
[data-testid = "user"] {
|
| 677 |
+
background: linear-gradient(130deg, #2685b5 20%, #135a7f 77.5%) !important;
|
| 678 |
+
color: white !important;
|
| 679 |
+
box-shadow: var(--cib-shadow-card) !important;
|
| 680 |
+
outline: transparent solid 1px !important;
|
| 681 |
+
}
|
| 682 |
+
|
| 683 |
+
/* 暗色 */
|
| 684 |
+
.dark #chuanhu_chatbot {
|
| 685 |
+
background-color: var(--chatbot-background-color-dark) !important;
|
| 686 |
+
color: var(--chatbot-color-dark) !important;
|
| 687 |
+
}
|
| 688 |
+
|
| 689 |
+
.dark [data-testid = "bot"] {
|
| 690 |
+
background-color: var(--message-bot-background-color-dark) !important;
|
| 691 |
+
}
|
| 692 |
+
|
| 693 |
+
.dark [data-testid = "user"] {
|
| 694 |
+
background: linear-gradient(130deg, #2685b5 20%, #135a7f 77.5%) !important;
|
| 695 |
+
}
|
| 696 |
+
|
| 697 |
+
/* 屏幕宽度大于等于500px的设备 */
|
| 698 |
+
/* update on 2023.4.8: 高度的细致调整已写入JavaScript */
|
| 699 |
+
@media screen and (min-width: 500px) {
|
| 700 |
+
#chuanhu_chatbot {
|
| 701 |
+
height: calc(100vh - 200px);
|
| 702 |
+
}
|
| 703 |
+
|
| 704 |
+
#chuanhu_chatbot > .wrapper > .wrap {
|
| 705 |
+
max-height: calc(100vh - 200px - var(--line-sm) * 1rem - 2 * var(--block-label-margin));
|
| 706 |
+
}
|
| 707 |
+
}
|
| 708 |
+
|
| 709 |
+
/* 屏幕宽度小于500px的设备 */
|
| 710 |
+
@media screen and (max-width: 499px) {
|
| 711 |
+
#chuanhu_chatbot {
|
| 712 |
+
height: calc(100vh - 140px);
|
| 713 |
+
}
|
| 714 |
+
|
| 715 |
+
#chuanhu_chatbot > .wrapper > .wrap {
|
| 716 |
+
max-height: calc(100vh - 140px - var(--line-sm) * 1rem - 2 * var(--block-label-margin));
|
| 717 |
+
}
|
| 718 |
+
|
| 719 |
+
[data-testid = "bot"] {
|
| 720 |
+
max-width: 95% !important;
|
| 721 |
+
}
|
| 722 |
+
|
| 723 |
+
#app_title h1 {
|
| 724 |
+
letter-spacing: -1px;
|
| 725 |
+
font-size: 22px;
|
| 726 |
+
}
|
| 727 |
+
}
|
| 728 |
+
|
| 729 |
+
#chuanhu_chatbot > .wrapper > .wrap {
|
| 730 |
+
overflow-x: hidden;
|
| 731 |
+
}
|
| 732 |
+
|
| 733 |
+
/* 对话气泡 */
|
| 734 |
+
.message {
|
| 735 |
+
border-radius: var(--radius-xl) !important;
|
| 736 |
+
border: none;
|
| 737 |
+
padding: var(--spacing-xl) !important;
|
| 738 |
+
font-size: var(--message-font-size) !important;
|
| 739 |
+
line-height: var(--line-md) !important;
|
| 740 |
+
min-height: calc(var(--text-md) * var(--line-md) + 2 * var(--spacing-xl));
|
| 741 |
+
min-width: calc(var(--text-md) * var(--line-md) + 2 * var(--spacing-xl));
|
| 742 |
+
}
|
| 743 |
+
|
| 744 |
+
[data-testid = "bot"] {
|
| 745 |
+
max-width: 85%;
|
| 746 |
+
border-bottom-left-radius: 0 !important;
|
| 747 |
+
}
|
| 748 |
+
|
| 749 |
+
[data-testid = "user"] {
|
| 750 |
+
max-width: 85%;
|
| 751 |
+
width: auto !important;
|
| 752 |
+
border-bottom-right-radius: 0 !important;
|
| 753 |
+
}
|
| 754 |
+
|
| 755 |
+
.message.user p {
|
| 756 |
+
white-space: pre-wrap;
|
| 757 |
+
}
|
| 758 |
+
|
| 759 |
+
.message .user-message {
|
| 760 |
+
display: block;
|
| 761 |
+
padding: 0 !important;
|
| 762 |
+
white-space: pre-wrap;
|
| 763 |
+
}
|
| 764 |
+
|
| 765 |
+
.message .md-message p {
|
| 766 |
+
margin-top: 0.6em !important;
|
| 767 |
+
margin-bottom: 0.6em !important;
|
| 768 |
+
}
|
| 769 |
+
|
| 770 |
+
.message .md-message p:first-child {
|
| 771 |
+
margin-top: 0 !important;
|
| 772 |
+
}
|
| 773 |
+
|
| 774 |
+
.message .md-message p:last-of-type {
|
| 775 |
+
margin-bottom: 0 !important;
|
| 776 |
+
}
|
| 777 |
+
|
| 778 |
+
.message .md-message {
|
| 779 |
+
display: block;
|
| 780 |
+
padding: 0 !important;
|
| 781 |
+
}
|
| 782 |
+
|
| 783 |
+
.message .raw-message p {
|
| 784 |
+
margin: 0 !important;
|
| 785 |
+
}
|
| 786 |
+
|
| 787 |
+
.message .raw-message {
|
| 788 |
+
display: block;
|
| 789 |
+
padding: 0 !important;
|
| 790 |
+
white-space: pre-wrap;
|
| 791 |
+
}
|
| 792 |
+
|
| 793 |
+
.raw-message.hideM, .md-message.hideM {
|
| 794 |
+
display: none;
|
| 795 |
+
}
|
| 796 |
+
|
| 797 |
+
/* custom buttons */
|
| 798 |
+
.chuanhu-btn {
|
| 799 |
+
border-radius: 5px;
|
| 800 |
+
color: rgba(120, 120, 120, 0.64) !important;
|
| 801 |
+
padding: 4px !important;
|
| 802 |
+
position: absolute;
|
| 803 |
+
right: -22px;
|
| 804 |
+
cursor: pointer !important;
|
| 805 |
+
transition: color .2s ease, background-color .2s ease;
|
| 806 |
+
}
|
| 807 |
+
|
| 808 |
+
.chuanhu-btn:hover {
|
| 809 |
+
background-color: rgba(167, 167, 167, 0.25) !important;
|
| 810 |
+
color: unset !important;
|
| 811 |
+
}
|
| 812 |
+
|
| 813 |
+
.chuanhu-btn:active {
|
| 814 |
+
background-color: rgba(167, 167, 167, 0.5) !important;
|
| 815 |
+
}
|
| 816 |
+
|
| 817 |
+
.chuanhu-btn:focus {
|
| 818 |
+
outline: none;
|
| 819 |
+
}
|
| 820 |
+
|
| 821 |
+
.copy-bot-btn {
|
| 822 |
+
/* top: 18px; */
|
| 823 |
+
bottom: 0;
|
| 824 |
+
}
|
| 825 |
+
|
| 826 |
+
.toggle-md-btn {
|
| 827 |
+
/* top: 0; */
|
| 828 |
+
bottom: 20px;
|
| 829 |
+
}
|
| 830 |
+
|
| 831 |
+
.copy-code-btn {
|
| 832 |
+
position: relative;
|
| 833 |
+
float: right;
|
| 834 |
+
font-size: 1em;
|
| 835 |
+
cursor: pointer;
|
| 836 |
+
}
|
| 837 |
+
|
| 838 |
+
.message-wrap > div img {
|
| 839 |
+
border-radius: 10px !important;
|
| 840 |
+
}
|
| 841 |
+
|
| 842 |
+
/* history message */
|
| 843 |
+
.wrapper > .wrap > .history-message {
|
| 844 |
+
padding: 10px !important;
|
| 845 |
+
}
|
| 846 |
+
|
| 847 |
+
.history-message {
|
| 848 |
+
/* padding: 0 !important; */
|
| 849 |
+
opacity: 80%;
|
| 850 |
+
display: flex;
|
| 851 |
+
flex-direction: column;
|
| 852 |
+
}
|
| 853 |
+
|
| 854 |
+
.history-message > .history-message {
|
| 855 |
+
padding: 0 !important;
|
| 856 |
+
}
|
| 857 |
+
|
| 858 |
+
.history-message > .message-wrap {
|
| 859 |
+
padding: 0 !important;
|
| 860 |
+
margin-bottom: 16px;
|
| 861 |
+
}
|
| 862 |
+
|
| 863 |
+
.history-message > .message {
|
| 864 |
+
margin-bottom: 16px;
|
| 865 |
+
}
|
| 866 |
+
|
| 867 |
+
.wrapper > .wrap > .history-message::after {
|
| 868 |
+
content: "";
|
| 869 |
+
display: block;
|
| 870 |
+
height: 2px;
|
| 871 |
+
background-color: var(--body-text-color-subdued);
|
| 872 |
+
margin-bottom: 10px;
|
| 873 |
+
margin-top: -10px;
|
| 874 |
+
clear: both;
|
| 875 |
+
}
|
| 876 |
+
|
| 877 |
+
.wrapper > .wrap > .history-message > :last-child::after {
|
| 878 |
+
content: "仅供查看";
|
| 879 |
+
display: block;
|
| 880 |
+
text-align: center;
|
| 881 |
+
color: var(--body-text-color-subdued);
|
| 882 |
+
font-size: 0.8em;
|
| 883 |
+
}
|
| 884 |
+
|
| 885 |
+
/* 表格 */
|
| 886 |
+
table {
|
| 887 |
+
margin: 1em 0;
|
| 888 |
+
border-collapse: collapse;
|
| 889 |
+
empty-cells: show;
|
| 890 |
+
}
|
| 891 |
+
|
| 892 |
+
td, th {
|
| 893 |
+
border: 1.2px solid var(--border-color-primary) !important;
|
| 894 |
+
padding: 0.2em;
|
| 895 |
+
}
|
| 896 |
+
|
| 897 |
+
thead {
|
| 898 |
+
background-color: rgba(175, 184, 193, 0.2);
|
| 899 |
+
}
|
| 900 |
+
|
| 901 |
+
thead th {
|
| 902 |
+
padding: .5em .2em;
|
| 903 |
+
}
|
| 904 |
+
|
| 905 |
+
.message :not(pre) code {
|
| 906 |
+
display: inline;
|
| 907 |
+
white-space: break-spaces;
|
| 908 |
+
font-family: var(--font-mono);
|
| 909 |
+
border-radius: 6px;
|
| 910 |
+
margin: 0 2px 0 2px;
|
| 911 |
+
padding: .2em .4em .1em .4em;
|
| 912 |
+
background-color: rgba(175, 184, 193, 0.2);
|
| 913 |
+
}
|
| 914 |
+
|
| 915 |
+
/* 代码块 */
|
| 916 |
+
.message pre,
|
| 917 |
+
.message pre[class*=language-] {
|
| 918 |
+
color: #fff;
|
| 919 |
+
overflow-x: auto;
|
| 920 |
+
overflow-y: hidden;
|
| 921 |
+
margin: .8em 1em 1em 0em !important;
|
| 922 |
+
padding: var(--spacing-xl) 1.2em !important;
|
| 923 |
+
border-radius: var(--radius-lg) !important;
|
| 924 |
+
}
|
| 925 |
+
|
| 926 |
+
.message pre code,
|
| 927 |
+
.message pre code[class*=language-] {
|
| 928 |
+
color: #fff;
|
| 929 |
+
padding: 0;
|
| 930 |
+
margin: 0;
|
| 931 |
+
background-color: unset;
|
| 932 |
+
text-shadow: none;
|
| 933 |
+
font-family: var(--font-mono);
|
| 934 |
+
}
|
| 935 |
+
|
| 936 |
+
/* 覆盖 gradio 丑陋的复制按钮样式 */
|
| 937 |
+
pre button[title="copy"] {
|
| 938 |
+
border-radius: 5px;
|
| 939 |
+
transition: background-color .2s ease;
|
| 940 |
+
}
|
| 941 |
+
|
| 942 |
+
pre button[title="copy"]:hover {
|
| 943 |
+
background-color: #333232;
|
| 944 |
+
}
|
| 945 |
+
|
| 946 |
+
pre button .check {
|
| 947 |
+
color: #fff !important;
|
| 948 |
+
background: var(--neutral-950) !important;
|
| 949 |
+
}
|
| 950 |
+
|
| 951 |
+
/* 覆盖prism.css */
|
| 952 |
+
.language-css .token.string,
|
| 953 |
+
.style .token.string,
|
| 954 |
+
.token.entity,
|
| 955 |
+
.token.operator,
|
| 956 |
+
.token.url {
|
| 957 |
+
background: none !important;
|
| 958 |
+
}
|
| 959 |
+
|
| 960 |
+
.label.svelte-13hsdno.svelte-13hsdno.svelte-13hsdno {
|
| 961 |
+
display: none;
|
| 962 |
+
}
|
| 963 |
+
|
| 964 |
+
.gallery.svelte-13hsdno.svelte-13hsdno.svelte-13hsdno {
|
| 965 |
+
justify-content: flex-end;
|
| 966 |
+
}
|
| 967 |
+
|
| 968 |
+
.button-group {
|
| 969 |
+
width: 200px !important;
|
| 970 |
+
display: flex !important;
|
| 971 |
+
justify-content: space-between !important;
|
| 972 |
+
}
|
| 973 |
+
|
| 974 |
+
.chatbot {
|
| 975 |
+
background: none !important;
|
| 976 |
+
border: none !important;
|
| 977 |
+
}
|
| 978 |
+
|
| 979 |
+
button[class^="svelte-"], button[class*="svelte-"].selected {
|
| 980 |
+
width: calc(var(--size-full) / 3) !important; /* number of elements */
|
| 981 |
+
background: none !important;
|
| 982 |
+
}
|
| 983 |
+
|
| 984 |
+
/*#component-2 {*/
|
| 985 |
+
/* gap: 0 !important;*/
|
| 986 |
+
/* margin-bottom: 3px !important;*/
|
| 987 |
+
/*}*/
|
| 988 |
+
|
| 989 |
+
.token, .token > span {
|
| 990 |
+
text-overflow: ellipsis;
|
| 991 |
+
overflow: hidden;
|
| 992 |
+
}
|
| 993 |
+
|
| 994 |
+
.audio-btn > div[class^="svelte-"], .audio-btn > audio {
|
| 995 |
+
display: none;
|
| 996 |
+
}
|
| 997 |
+
|
| 998 |
+
div.logo {
|
| 999 |
+
background: url('https://i.ibb.co/BnmxGhz/logo.png') no-repeat left center;
|
| 1000 |
+
background-size: contain;
|
| 1001 |
+
height: 40px;
|
| 1002 |
+
align-items: flex-start;
|
| 1003 |
+
justify-content: flex-start;
|
| 1004 |
+
}
|
| 1005 |
+
|
| 1006 |
+
#component-29 {
|
| 1007 |
+
font-size: var(--message-font-size) !important;
|
| 1008 |
+
box-shadow: var(--cib-shadow-card) !important;
|
| 1009 |
+
outline: transparent solid 1px !important;
|
| 1010 |
+
}
|
| 1011 |
+
|
| 1012 |
+
#component-26 > div.gallery.svelte-13hsdno > button > div {
|
| 1013 |
+
background: var(--chatbot-background-color-light) !important;
|
| 1014 |
+
}
|
| 1015 |
+
|
| 1016 |
+
.dark #component-26 > div.gallery.svelte-13hsdno > button > div {
|
| 1017 |
+
background: var(--chatbot-background-color-dark) !important;
|
| 1018 |
+
}
|
| 1019 |
+
|
| 1020 |
+
.message.pending {
|
| 1021 |
+
background: none !important;
|
| 1022 |
+
}
|
| 1023 |
+
|
| 1024 |
+
#component-36 > label {
|
| 1025 |
+
width: inherit;
|
| 1026 |
+
}
|
custom_vectordb.py
ADDED
|
@@ -0,0 +1,421 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
"""Wrapper around Pinecone vector database."""
|
| 2 |
+
from __future__ import annotations
|
| 3 |
+
|
| 4 |
+
import logging
|
| 5 |
+
import uuid
|
| 6 |
+
from typing import Any, Callable, Iterable, List, Optional, Tuple
|
| 7 |
+
|
| 8 |
+
import numpy as np
|
| 9 |
+
|
| 10 |
+
from langchain.docstore.document import Document
|
| 11 |
+
from langchain.embeddings.base import Embeddings
|
| 12 |
+
from langchain.vectorstores.base import VectorStore
|
| 13 |
+
from langchain.vectorstores.utils import DistanceStrategy, maximal_marginal_relevance
|
| 14 |
+
|
| 15 |
+
logger = logging.getLogger(__name__)
|
| 16 |
+
|
| 17 |
+
|
| 18 |
+
class Pinecone(VectorStore):
|
| 19 |
+
"""Wrapper around Pinecone vector database.
|
| 20 |
+
|
| 21 |
+
To use, you should have the ``pinecone-client`` python package installed.
|
| 22 |
+
|
| 23 |
+
Example:
|
| 24 |
+
.. code-block:: python
|
| 25 |
+
|
| 26 |
+
from langchain.vectorstores import Pinecone
|
| 27 |
+
from langchain.embeddings.openai import OpenAIEmbeddings
|
| 28 |
+
import pinecone
|
| 29 |
+
|
| 30 |
+
# The environment should be the one specified next to the API key
|
| 31 |
+
# in your Pinecone console
|
| 32 |
+
pinecone.init(api_key="***", environment="...")
|
| 33 |
+
index = pinecone.Index("langchain-demo")
|
| 34 |
+
embeddings = OpenAIEmbeddings()
|
| 35 |
+
vectorstore = Pinecone(index, embeddings.embed_query, "text")
|
| 36 |
+
"""
|
| 37 |
+
|
| 38 |
+
def __init__(
|
| 39 |
+
self,
|
| 40 |
+
index: Any,
|
| 41 |
+
embedding_function: Callable,
|
| 42 |
+
text_key: str,
|
| 43 |
+
namespace: Optional[str] = None,
|
| 44 |
+
distance_strategy: Optional[DistanceStrategy] = DistanceStrategy.COSINE,
|
| 45 |
+
):
|
| 46 |
+
"""Initialize with Pinecone client."""
|
| 47 |
+
try:
|
| 48 |
+
import pinecone
|
| 49 |
+
except ImportError:
|
| 50 |
+
raise ValueError(
|
| 51 |
+
"Could not import pinecone python package. "
|
| 52 |
+
"Please install it with `pip install pinecone-client`."
|
| 53 |
+
)
|
| 54 |
+
if not isinstance(index, pinecone.index.Index):
|
| 55 |
+
raise ValueError(
|
| 56 |
+
f"client should be an instance of pinecone.index.Index, "
|
| 57 |
+
f"got {type(index)}"
|
| 58 |
+
)
|
| 59 |
+
self._index = index
|
| 60 |
+
self._embedding_function = embedding_function
|
| 61 |
+
self._text_key = text_key
|
| 62 |
+
self._namespace = namespace
|
| 63 |
+
self.distance_strategy = distance_strategy
|
| 64 |
+
|
| 65 |
+
@property
|
| 66 |
+
def embeddings(self) -> Optional[Embeddings]:
|
| 67 |
+
# TODO: Accept this object directly
|
| 68 |
+
return None
|
| 69 |
+
|
| 70 |
+
def add_texts(
|
| 71 |
+
self,
|
| 72 |
+
texts: Iterable[str],
|
| 73 |
+
metadatas: Optional[List[dict]] = None,
|
| 74 |
+
ids: Optional[List[str]] = None,
|
| 75 |
+
namespace: Optional[str] = None,
|
| 76 |
+
batch_size: int = 32,
|
| 77 |
+
**kwargs: Any,
|
| 78 |
+
) -> List[str]:
|
| 79 |
+
"""Run more texts through the embeddings and add to the vectorstore.
|
| 80 |
+
|
| 81 |
+
Args:
|
| 82 |
+
texts: Iterable of strings to add to the vectorstore.
|
| 83 |
+
metadatas: Optional list of metadatas associated with the texts.
|
| 84 |
+
ids: Optional list of ids to associate with the texts.
|
| 85 |
+
namespace: Optional pinecone namespace to add the texts to.
|
| 86 |
+
|
| 87 |
+
Returns:
|
| 88 |
+
List of ids from adding the texts into the vectorstore.
|
| 89 |
+
|
| 90 |
+
"""
|
| 91 |
+
if namespace is None:
|
| 92 |
+
namespace = self._namespace
|
| 93 |
+
# Embed and create the documents
|
| 94 |
+
docs = []
|
| 95 |
+
ids = ids or [str(uuid.uuid4()) for _ in texts]
|
| 96 |
+
for i, text in enumerate(texts):
|
| 97 |
+
embedding = self._embedding_function(text)
|
| 98 |
+
metadata = metadatas[i] if metadatas else {}
|
| 99 |
+
metadata[self._text_key] = text
|
| 100 |
+
docs.append((ids[i], embedding, metadata))
|
| 101 |
+
# upsert to Pinecone
|
| 102 |
+
self._index.upsert(
|
| 103 |
+
vectors=docs, namespace=namespace, batch_size=batch_size, **kwargs
|
| 104 |
+
)
|
| 105 |
+
return ids
|
| 106 |
+
|
| 107 |
+
def similarity_search_with_relevance_scores(
|
| 108 |
+
self,
|
| 109 |
+
query: str,
|
| 110 |
+
k: int = 4,
|
| 111 |
+
**kwargs: Any,
|
| 112 |
+
) -> List[Tuple[Document, float]]:
|
| 113 |
+
return [
|
| 114 |
+
a
|
| 115 |
+
for a in self.similarity_search_with_score(query, k=k)
|
| 116 |
+
if a[1] > kwargs["score_threshold"]
|
| 117 |
+
]
|
| 118 |
+
|
| 119 |
+
def similarity_search_with_score(
|
| 120 |
+
self,
|
| 121 |
+
query: str,
|
| 122 |
+
k: int = 4,
|
| 123 |
+
filter: Optional[dict] = None,
|
| 124 |
+
namespace: Optional[str] = None,
|
| 125 |
+
) -> List[Tuple[Document, float]]:
|
| 126 |
+
"""Return pinecone documents most similar to query, along with scores.
|
| 127 |
+
|
| 128 |
+
Args:
|
| 129 |
+
query: Text to look up documents similar to.
|
| 130 |
+
k: Number of Documents to return. Defaults to 4.
|
| 131 |
+
filter: Dictionary of argument(s) to filter on metadata
|
| 132 |
+
namespace: Namespace to search in. Default will search in '' namespace.
|
| 133 |
+
|
| 134 |
+
Returns:
|
| 135 |
+
List of Documents most similar to the query and score for each
|
| 136 |
+
"""
|
| 137 |
+
if namespace is None:
|
| 138 |
+
namespace = self._namespace
|
| 139 |
+
query_obj = self._embedding_function(query)
|
| 140 |
+
docs = []
|
| 141 |
+
results = self._index.query(
|
| 142 |
+
[query_obj],
|
| 143 |
+
top_k=k,
|
| 144 |
+
include_metadata=True,
|
| 145 |
+
namespace=namespace,
|
| 146 |
+
filter=filter,
|
| 147 |
+
)
|
| 148 |
+
for res in results["matches"]:
|
| 149 |
+
metadata = res["metadata"]
|
| 150 |
+
if self._text_key in metadata:
|
| 151 |
+
text = metadata.pop(self._text_key)
|
| 152 |
+
score = res["score"]
|
| 153 |
+
docs.append((Document(page_content=text, metadata=metadata), score))
|
| 154 |
+
else:
|
| 155 |
+
logger.warning(
|
| 156 |
+
f"Found document with no `{self._text_key}` key. Skipping."
|
| 157 |
+
)
|
| 158 |
+
return docs
|
| 159 |
+
|
| 160 |
+
def similarity_search(
|
| 161 |
+
self,
|
| 162 |
+
query: str,
|
| 163 |
+
k: int = 4,
|
| 164 |
+
filter: Optional[dict] = None,
|
| 165 |
+
namespace: Optional[str] = None,
|
| 166 |
+
**kwargs: Any,
|
| 167 |
+
) -> List[Document]:
|
| 168 |
+
"""Return pinecone documents most similar to query.
|
| 169 |
+
|
| 170 |
+
Args:
|
| 171 |
+
query: Text to look up documents similar to.
|
| 172 |
+
k: Number of Documents to return. Defaults to 4.
|
| 173 |
+
filter: Dictionary of argument(s) to filter on metadata
|
| 174 |
+
namespace: Namespace to search in. Default will search in '' namespace.
|
| 175 |
+
|
| 176 |
+
Returns:
|
| 177 |
+
List of Documents most similar to the query and score for each
|
| 178 |
+
"""
|
| 179 |
+
docs_and_scores = self.similarity_search_with_score(
|
| 180 |
+
query, k=k, filter=filter, namespace=namespace, **kwargs
|
| 181 |
+
)
|
| 182 |
+
return [doc for doc, _ in docs_and_scores]
|
| 183 |
+
|
| 184 |
+
def _select_relevance_score_fn(self) -> Callable[[float], float]:
|
| 185 |
+
"""
|
| 186 |
+
The 'correct' relevance function
|
| 187 |
+
may differ depending on a few things, including:
|
| 188 |
+
- the distance / similarity metric used by the VectorStore
|
| 189 |
+
- the scale of your embeddings (OpenAI's are unit normed. Many others are not!)
|
| 190 |
+
- embedding dimensionality
|
| 191 |
+
- etc.
|
| 192 |
+
"""
|
| 193 |
+
|
| 194 |
+
if self.distance_strategy == DistanceStrategy.COSINE:
|
| 195 |
+
return self._cosine_relevance_score_fn
|
| 196 |
+
elif self.distance_strategy == DistanceStrategy.MAX_INNER_PRODUCT:
|
| 197 |
+
return self._max_inner_product_relevance_score_fn
|
| 198 |
+
elif self.distance_strategy == DistanceStrategy.EUCLIDEAN_DISTANCE:
|
| 199 |
+
return self._euclidean_relevance_score_fn
|
| 200 |
+
else:
|
| 201 |
+
raise ValueError(
|
| 202 |
+
"Unknown distance strategy, must be cosine, max_inner_product "
|
| 203 |
+
"(dot product), or euclidean"
|
| 204 |
+
)
|
| 205 |
+
|
| 206 |
+
def max_marginal_relevance_search_by_vector(
|
| 207 |
+
self,
|
| 208 |
+
embedding: List[float],
|
| 209 |
+
k: int = 4,
|
| 210 |
+
fetch_k: int = 20,
|
| 211 |
+
lambda_mult: float = 0.5,
|
| 212 |
+
filter: Optional[dict] = None,
|
| 213 |
+
namespace: Optional[str] = None,
|
| 214 |
+
**kwargs: Any,
|
| 215 |
+
) -> List[Document]:
|
| 216 |
+
"""Return docs selected using the maximal marginal relevance.
|
| 217 |
+
|
| 218 |
+
Maximal marginal relevance optimizes for similarity to query AND diversity
|
| 219 |
+
among selected documents.
|
| 220 |
+
|
| 221 |
+
Args:
|
| 222 |
+
embedding: Embedding to look up documents similar to.
|
| 223 |
+
k: Number of Documents to return. Defaults to 4.
|
| 224 |
+
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
|
| 225 |
+
lambda_mult: Number between 0 and 1 that determines the degree
|
| 226 |
+
of diversity among the results with 0 corresponding
|
| 227 |
+
to maximum diversity and 1 to minimum diversity.
|
| 228 |
+
Defaults to 0.5.
|
| 229 |
+
Returns:
|
| 230 |
+
List of Documents selected by maximal marginal relevance.
|
| 231 |
+
"""
|
| 232 |
+
if namespace is None:
|
| 233 |
+
namespace = self._namespace
|
| 234 |
+
results = self._index.query(
|
| 235 |
+
[embedding],
|
| 236 |
+
top_k=fetch_k,
|
| 237 |
+
include_values=True,
|
| 238 |
+
include_metadata=True,
|
| 239 |
+
namespace=namespace,
|
| 240 |
+
filter=filter,
|
| 241 |
+
)
|
| 242 |
+
mmr_selected = maximal_marginal_relevance(
|
| 243 |
+
np.array([embedding], dtype=np.float32),
|
| 244 |
+
[item["values"] for item in results["matches"]],
|
| 245 |
+
k=k,
|
| 246 |
+
lambda_mult=lambda_mult,
|
| 247 |
+
)
|
| 248 |
+
selected = [results["matches"][i]["metadata"] for i in mmr_selected]
|
| 249 |
+
return [
|
| 250 |
+
Document(page_content=metadata.pop((self._text_key)), metadata=metadata)
|
| 251 |
+
for metadata in selected
|
| 252 |
+
]
|
| 253 |
+
|
| 254 |
+
def max_marginal_relevance_search(
|
| 255 |
+
self,
|
| 256 |
+
query: str,
|
| 257 |
+
k: int = 4,
|
| 258 |
+
fetch_k: int = 20,
|
| 259 |
+
lambda_mult: float = 0.5,
|
| 260 |
+
filter: Optional[dict] = None,
|
| 261 |
+
namespace: Optional[str] = None,
|
| 262 |
+
**kwargs: Any,
|
| 263 |
+
) -> List[Document]:
|
| 264 |
+
"""Return docs selected using the maximal marginal relevance.
|
| 265 |
+
|
| 266 |
+
Maximal marginal relevance optimizes for similarity to query AND diversity
|
| 267 |
+
among selected documents.
|
| 268 |
+
|
| 269 |
+
Args:
|
| 270 |
+
query: Text to look up documents similar to.
|
| 271 |
+
k: Number of Documents to return. Defaults to 4.
|
| 272 |
+
fetch_k: Number of Documents to fetch to pass to MMR algorithm.
|
| 273 |
+
lambda_mult: Number between 0 and 1 that determines the degree
|
| 274 |
+
of diversity among the results with 0 corresponding
|
| 275 |
+
to maximum diversity and 1 to minimum diversity.
|
| 276 |
+
Defaults to 0.5.
|
| 277 |
+
Returns:
|
| 278 |
+
List of Documents selected by maximal marginal relevance.
|
| 279 |
+
"""
|
| 280 |
+
embedding = self._embedding_function(query)
|
| 281 |
+
return self.max_marginal_relevance_search_by_vector(
|
| 282 |
+
embedding, k, fetch_k, lambda_mult, filter, namespace
|
| 283 |
+
)
|
| 284 |
+
|
| 285 |
+
@classmethod
|
| 286 |
+
def from_texts(
|
| 287 |
+
cls,
|
| 288 |
+
texts: List[str],
|
| 289 |
+
embedding: Embeddings,
|
| 290 |
+
metadatas: Optional[List[dict]] = None,
|
| 291 |
+
ids: Optional[List[str]] = None,
|
| 292 |
+
batch_size: int = 32,
|
| 293 |
+
text_key: str = "text",
|
| 294 |
+
index_name: Optional[str] = None,
|
| 295 |
+
namespace: Optional[str] = None,
|
| 296 |
+
upsert_kwargs: Optional[dict] = None,
|
| 297 |
+
**kwargs: Any,
|
| 298 |
+
) -> Pinecone:
|
| 299 |
+
"""Construct Pinecone wrapper from raw documents.
|
| 300 |
+
|
| 301 |
+
This is a user friendly interface that:
|
| 302 |
+
1. Embeds documents.
|
| 303 |
+
2. Adds the documents to a provided Pinecone index
|
| 304 |
+
|
| 305 |
+
This is intended to be a quick way to get started.
|
| 306 |
+
|
| 307 |
+
Example:
|
| 308 |
+
.. code-block:: python
|
| 309 |
+
|
| 310 |
+
from langchain import Pinecone
|
| 311 |
+
from langchain.embeddings import OpenAIEmbeddings
|
| 312 |
+
import pinecone
|
| 313 |
+
|
| 314 |
+
# The environment should be the one specified next to the API key
|
| 315 |
+
# in your Pinecone console
|
| 316 |
+
pinecone.init(api_key="***", environment="...")
|
| 317 |
+
embeddings = OpenAIEmbeddings()
|
| 318 |
+
pinecone = Pinecone.from_texts(
|
| 319 |
+
texts,
|
| 320 |
+
embeddings,
|
| 321 |
+
index_name="langchain-demo"
|
| 322 |
+
)
|
| 323 |
+
"""
|
| 324 |
+
try:
|
| 325 |
+
import pinecone
|
| 326 |
+
except ImportError:
|
| 327 |
+
raise ValueError(
|
| 328 |
+
"Could not import pinecone python package. "
|
| 329 |
+
"Please install it with `pip install pinecone-client`."
|
| 330 |
+
)
|
| 331 |
+
|
| 332 |
+
indexes = pinecone.list_indexes() # checks if provided index exists
|
| 333 |
+
|
| 334 |
+
if index_name in indexes:
|
| 335 |
+
index = pinecone.Index(index_name)
|
| 336 |
+
elif len(indexes) == 0:
|
| 337 |
+
raise ValueError(
|
| 338 |
+
"No active indexes found in your Pinecone project, "
|
| 339 |
+
"are you sure you're using the right API key and environment?"
|
| 340 |
+
)
|
| 341 |
+
else:
|
| 342 |
+
raise ValueError(
|
| 343 |
+
f"Index '{index_name}' not found in your Pinecone project. "
|
| 344 |
+
f"Did you mean one of the following indexes: {', '.join(indexes)}"
|
| 345 |
+
)
|
| 346 |
+
for i in range(0, len(texts), batch_size):
|
| 347 |
+
# set end position of batch
|
| 348 |
+
i_end = min(i + batch_size, len(texts))
|
| 349 |
+
# get batch of texts and ids
|
| 350 |
+
lines_batch = texts[i:i_end]
|
| 351 |
+
# create ids if not provided
|
| 352 |
+
if ids:
|
| 353 |
+
ids_batch = ids[i:i_end]
|
| 354 |
+
else:
|
| 355 |
+
ids_batch = [str(uuid.uuid4()) for n in range(i, i_end)]
|
| 356 |
+
# create embeddings
|
| 357 |
+
embeds = embedding.embed_documents(lines_batch)
|
| 358 |
+
# prep metadata and upsert batch
|
| 359 |
+
if metadatas:
|
| 360 |
+
metadata = metadatas[i:i_end]
|
| 361 |
+
else:
|
| 362 |
+
metadata = [{} for _ in range(i, i_end)]
|
| 363 |
+
for j, line in enumerate(lines_batch):
|
| 364 |
+
metadata[j][text_key] = line
|
| 365 |
+
to_upsert = zip(ids_batch, embeds, metadata)
|
| 366 |
+
# upsert to Pinecone
|
| 367 |
+
_upsert_kwargs = upsert_kwargs or {}
|
| 368 |
+
index.upsert(vectors=list(to_upsert), namespace=namespace, **_upsert_kwargs)
|
| 369 |
+
return cls(index, embedding.embed_query, text_key, namespace, **kwargs)
|
| 370 |
+
|
| 371 |
+
@classmethod
|
| 372 |
+
def from_existing_index(
|
| 373 |
+
cls,
|
| 374 |
+
index_name: str,
|
| 375 |
+
embedding: Embeddings,
|
| 376 |
+
text_key: str = "text",
|
| 377 |
+
namespace: Optional[str] = None,
|
| 378 |
+
) -> Pinecone:
|
| 379 |
+
"""Load pinecone vectorstore from index name."""
|
| 380 |
+
try:
|
| 381 |
+
import pinecone
|
| 382 |
+
except ImportError:
|
| 383 |
+
raise ValueError(
|
| 384 |
+
"Could not import pinecone python package. "
|
| 385 |
+
"Please install it with `pip install pinecone-client`."
|
| 386 |
+
)
|
| 387 |
+
|
| 388 |
+
return cls(
|
| 389 |
+
pinecone.Index(index_name), embedding.embed_query, text_key, namespace
|
| 390 |
+
)
|
| 391 |
+
|
| 392 |
+
def delete(
|
| 393 |
+
self,
|
| 394 |
+
ids: Optional[List[str]] = None,
|
| 395 |
+
delete_all: Optional[bool] = None,
|
| 396 |
+
namespace: Optional[str] = None,
|
| 397 |
+
filter: Optional[dict] = None,
|
| 398 |
+
**kwargs: Any,
|
| 399 |
+
) -> None:
|
| 400 |
+
"""Delete by vector IDs or filter.
|
| 401 |
+
Args:
|
| 402 |
+
ids: List of ids to delete.
|
| 403 |
+
filter: Dictionary of conditions to filter vectors to delete.
|
| 404 |
+
"""
|
| 405 |
+
|
| 406 |
+
if namespace is None:
|
| 407 |
+
namespace = self._namespace
|
| 408 |
+
|
| 409 |
+
if delete_all:
|
| 410 |
+
self._index.delete(delete_all=True, namespace=namespace, **kwargs)
|
| 411 |
+
elif ids is not None:
|
| 412 |
+
chunk_size = 1000
|
| 413 |
+
for i in range(0, len(ids), chunk_size):
|
| 414 |
+
chunk = ids[i : i + chunk_size]
|
| 415 |
+
self._index.delete(ids=chunk, namespace=namespace, **kwargs)
|
| 416 |
+
elif filter is not None:
|
| 417 |
+
self._index.delete(filter=filter, namespace=namespace, **kwargs)
|
| 418 |
+
else:
|
| 419 |
+
raise ValueError("Either ids, delete_all, or filter must be provided.")
|
| 420 |
+
|
| 421 |
+
return None
|
data.json
CHANGED
|
The diff for this file is too large to render.
See raw diff
|
|
|
geckodriver.log
DELETED
|
The diff for this file is too large to render.
See raw diff
|
|
|
history/binh/2023-08-06_17-10-17/Assistance Inquiry.json
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
{"history": [["Hello bot", "Hello! How can I assist you today?"]], "chatbot": [["Hello bot", "Hello! How can I assist you today?"]]}
|
html_parser.py
DELETED
|
@@ -1,116 +0,0 @@
|
|
| 1 |
-
"""HTML parser.
|
| 2 |
-
|
| 3 |
-
Contains parser for html files.
|
| 4 |
-
|
| 5 |
-
"""
|
| 6 |
-
import re
|
| 7 |
-
from pathlib import Path
|
| 8 |
-
from typing import Dict, Union
|
| 9 |
-
from abc import abstractmethod
|
| 10 |
-
from pathlib import Path
|
| 11 |
-
from typing import Dict, List, Optional, Union
|
| 12 |
-
|
| 13 |
-
|
| 14 |
-
class BaseParser:
|
| 15 |
-
"""Base class for all parsers."""
|
| 16 |
-
|
| 17 |
-
def __init__(self, parser_config: Optional[Dict] = None):
|
| 18 |
-
"""Init params."""
|
| 19 |
-
self._parser_config = parser_config
|
| 20 |
-
|
| 21 |
-
def init_parser(self) -> None:
|
| 22 |
-
"""Init parser and store it."""
|
| 23 |
-
parser_config = self._init_parser()
|
| 24 |
-
self._parser_config = parser_config
|
| 25 |
-
|
| 26 |
-
@property
|
| 27 |
-
def parser_config_set(self) -> bool:
|
| 28 |
-
"""Check if parser config is set."""
|
| 29 |
-
return self._parser_config is not None
|
| 30 |
-
|
| 31 |
-
@property
|
| 32 |
-
def parser_config(self) -> Dict:
|
| 33 |
-
"""Check if parser config is set."""
|
| 34 |
-
if self._parser_config is None:
|
| 35 |
-
raise ValueError("Parser config not set.")
|
| 36 |
-
return self._parser_config
|
| 37 |
-
|
| 38 |
-
@abstractmethod
|
| 39 |
-
def _init_parser(self) -> Dict:
|
| 40 |
-
"""Initialize the parser with the config."""
|
| 41 |
-
|
| 42 |
-
@abstractmethod
|
| 43 |
-
def parse_file(self, file: Path, errors: str = "ignore") -> Union[str, List[str]]:
|
| 44 |
-
"""Parse file."""
|
| 45 |
-
|
| 46 |
-
class HTMLParser(BaseParser):
|
| 47 |
-
"""HTML parser."""
|
| 48 |
-
|
| 49 |
-
def _init_parser(self) -> Dict:
|
| 50 |
-
"""Init parser."""
|
| 51 |
-
return {}
|
| 52 |
-
|
| 53 |
-
def parse_file(self, file: Path, errors: str = "ignore") -> Union[str, list[str]]:
|
| 54 |
-
"""Parse file.
|
| 55 |
-
|
| 56 |
-
Returns:
|
| 57 |
-
Union[str, List[str]]: a string or a List of strings.
|
| 58 |
-
"""
|
| 59 |
-
try:
|
| 60 |
-
from unstructured.partition.html import partition_html
|
| 61 |
-
from unstructured.staging.base import convert_to_isd
|
| 62 |
-
from unstructured.cleaners.core import clean
|
| 63 |
-
except ImportError:
|
| 64 |
-
raise ValueError("unstructured package is required to parse HTML files.")
|
| 65 |
-
|
| 66 |
-
# Using the unstructured library to convert the html to isd format
|
| 67 |
-
# isd sample : isd = [
|
| 68 |
-
# {"text": "My Title", "type": "Title"},
|
| 69 |
-
# {"text": "My Narrative", "type": "NarrativeText"}
|
| 70 |
-
# ]
|
| 71 |
-
with open(file, "r", encoding="utf-8") as fp:
|
| 72 |
-
elements = partition_html(file=fp)
|
| 73 |
-
isd = convert_to_isd(elements)
|
| 74 |
-
|
| 75 |
-
# Removing non ascii charactwers from isd_el['text']
|
| 76 |
-
for isd_el in isd:
|
| 77 |
-
isd_el['text'] = isd_el['text'].encode("ascii", "ignore").decode()
|
| 78 |
-
|
| 79 |
-
# Removing all the \n characters from isd_el['text'] using regex and replace with single space
|
| 80 |
-
# Removing all the extra spaces from isd_el['text'] using regex and replace with single space
|
| 81 |
-
for isd_el in isd:
|
| 82 |
-
isd_el['text'] = re.sub(r'\n', ' ', isd_el['text'], flags=re.MULTILINE | re.DOTALL)
|
| 83 |
-
isd_el['text'] = re.sub(r"\s{2,}", " ", isd_el['text'], flags=re.MULTILINE | re.DOTALL)
|
| 84 |
-
|
| 85 |
-
# more cleaning: extra_whitespaces, dashes, bullets, trailing_punctuation
|
| 86 |
-
for isd_el in isd:
|
| 87 |
-
clean(isd_el['text'], extra_whitespace=True, dashes=True, bullets=True, trailing_punctuation=True)
|
| 88 |
-
|
| 89 |
-
# Creating a list of all the indexes of isd_el['type'] = 'Title'
|
| 90 |
-
title_indexes = [i for i, isd_el in enumerate(isd) if isd_el['type'] == 'Title']
|
| 91 |
-
|
| 92 |
-
# Creating 'Chunks' - List of lists of strings
|
| 93 |
-
# each list starting with with isd_el['type'] = 'Title' and all the data till the next 'Title'
|
| 94 |
-
# Each Chunk can be thought of as an individual set of data, which can be sent to the model
|
| 95 |
-
# Where Each Title is grouped together with the data under it
|
| 96 |
-
|
| 97 |
-
Chunks = [[]]
|
| 98 |
-
final_chunks = list(list())
|
| 99 |
-
|
| 100 |
-
for i, isd_el in enumerate(isd):
|
| 101 |
-
if i in title_indexes:
|
| 102 |
-
Chunks.append([])
|
| 103 |
-
Chunks[-1].append(isd_el['text'])
|
| 104 |
-
|
| 105 |
-
# Removing all the chunks with sum of lenth of all the strings in the chunk < 25
|
| 106 |
-
# TODO: This value can be an user defined variable
|
| 107 |
-
for chunk in Chunks:
|
| 108 |
-
# sum of lenth of all the strings in the chunk
|
| 109 |
-
sum = 0
|
| 110 |
-
sum += len(str(chunk))
|
| 111 |
-
if sum < 25:
|
| 112 |
-
Chunks.remove(chunk)
|
| 113 |
-
else:
|
| 114 |
-
# appending all the approved chunks to final_chunks as a single string
|
| 115 |
-
final_chunks.append(" ".join([str(item) for item in chunk]))
|
| 116 |
-
return final_chunks
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
logo.png
ADDED
|
process_fb.py
DELETED
|
@@ -1,55 +0,0 @@
|
|
| 1 |
-
import json
|
| 2 |
-
import ast
|
| 3 |
-
import os
|
| 4 |
-
import pinecone
|
| 5 |
-
|
| 6 |
-
from pydantic import Field
|
| 7 |
-
from vector_db import Document
|
| 8 |
-
from html_parser import HTMLParser
|
| 9 |
-
from langchain.vectorstores import Pinecone
|
| 10 |
-
from config import PINECONE_API_KEY, PINECONE_ENVIRONMENT, INDEX_NAME
|
| 11 |
-
from config import EMBEDDING_API_BASE, EMBEDDING_API_KEY, OPENAI_API_TYPE, OPENAI_API_VERSION, EMBEDDING_DEPLOYMENT_ID
|
| 12 |
-
from langchain.embeddings import OpenAIEmbeddings
|
| 13 |
-
|
| 14 |
-
|
| 15 |
-
# initialize pinecone
|
| 16 |
-
pinecone.init(
|
| 17 |
-
api_key=PINECONE_API_KEY, # find at app.pinecone.io
|
| 18 |
-
environment=PINECONE_ENVIRONMENT, # next to api key in console
|
| 19 |
-
)
|
| 20 |
-
|
| 21 |
-
# Azure embedding model definition
|
| 22 |
-
embeddings = OpenAIEmbeddings(
|
| 23 |
-
deployment=EMBEDDING_DEPLOYMENT_ID,
|
| 24 |
-
openai_api_key=EMBEDDING_API_KEY,
|
| 25 |
-
openai_api_base=EMBEDDING_API_BASE,
|
| 26 |
-
openai_api_type=OPENAI_API_TYPE,
|
| 27 |
-
openai_api_version=OPENAI_API_VERSION,
|
| 28 |
-
chunk_size=16
|
| 29 |
-
)
|
| 30 |
-
|
| 31 |
-
if INDEX_NAME and INDEX_NAME not in pinecone.list_indexes():
|
| 32 |
-
pinecone.create_index(
|
| 33 |
-
INDEX_NAME,
|
| 34 |
-
metric="cosine",
|
| 35 |
-
dimension=1536
|
| 36 |
-
)
|
| 37 |
-
print(f"Index {INDEX_NAME} created successfully")
|
| 38 |
-
|
| 39 |
-
index = pinecone.Index(INDEX_NAME)
|
| 40 |
-
|
| 41 |
-
with open('data.json') as json_file:
|
| 42 |
-
data = json.load(json_file)
|
| 43 |
-
datas = ast.literal_eval(data)
|
| 44 |
-
|
| 45 |
-
texts = []
|
| 46 |
-
for k, v in datas.items():
|
| 47 |
-
content = v["content"]
|
| 48 |
-
post_url = v["post_url"]
|
| 49 |
-
texts.append(Document(page_content=content, metadata={"source": post_url}))
|
| 50 |
-
|
| 51 |
-
if len(texts)>0:
|
| 52 |
-
Pinecone.from_documents(texts, embeddings, index_name=INDEX_NAME)
|
| 53 |
-
message = f"Add files to {INDEX_NAME} sucessfully"
|
| 54 |
-
print(message)
|
| 55 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
process_html.py
DELETED
|
@@ -1,58 +0,0 @@
|
|
| 1 |
-
|
| 2 |
-
|
| 3 |
-
import os
|
| 4 |
-
import pinecone
|
| 5 |
-
|
| 6 |
-
from pydantic import Field
|
| 7 |
-
from vector_db import Document
|
| 8 |
-
from html_parser import HTMLParser
|
| 9 |
-
from langchain.vectorstores import Pinecone
|
| 10 |
-
from config import PINECONE_API_KEY, PINECONE_ENVIRONMENT, INDEX_NAME
|
| 11 |
-
from config import EMBEDDING_API_BASE, EMBEDDING_API_KEY, OPENAI_API_TYPE, OPENAI_API_VERSION, EMBEDDING_DEPLOYMENT_ID
|
| 12 |
-
from langchain.embeddings import OpenAIEmbeddings
|
| 13 |
-
|
| 14 |
-
WEBSITE_FOLDER = 'website'
|
| 15 |
-
parser = HTMLParser()
|
| 16 |
-
|
| 17 |
-
# initialize pinecone
|
| 18 |
-
pinecone.init(
|
| 19 |
-
api_key=PINECONE_API_KEY, # find at app.pinecone.io
|
| 20 |
-
environment=PINECONE_ENVIRONMENT, # next to api key in console
|
| 21 |
-
)
|
| 22 |
-
|
| 23 |
-
# Azure embedding model definition
|
| 24 |
-
embeddings = OpenAIEmbeddings(
|
| 25 |
-
deployment=EMBEDDING_DEPLOYMENT_ID,
|
| 26 |
-
openai_api_key=EMBEDDING_API_KEY,
|
| 27 |
-
openai_api_base=EMBEDDING_API_BASE,
|
| 28 |
-
openai_api_type=OPENAI_API_TYPE,
|
| 29 |
-
openai_api_version=OPENAI_API_VERSION,
|
| 30 |
-
chunk_size=16
|
| 31 |
-
)
|
| 32 |
-
|
| 33 |
-
if INDEX_NAME and INDEX_NAME not in pinecone.list_indexes():
|
| 34 |
-
pinecone.create_index(
|
| 35 |
-
INDEX_NAME,
|
| 36 |
-
metric="cosine",
|
| 37 |
-
dimension=1536
|
| 38 |
-
)
|
| 39 |
-
print(f"Index {INDEX_NAME} created successfully")
|
| 40 |
-
|
| 41 |
-
index = pinecone.Index(INDEX_NAME)
|
| 42 |
-
index.delete(delete_all=True)
|
| 43 |
-
|
| 44 |
-
files_src = os.listdir(WEBSITE_FOLDER)
|
| 45 |
-
documents = []
|
| 46 |
-
for file in files_src:
|
| 47 |
-
filepath = os.path.join(WEBSITE_FOLDER, file)
|
| 48 |
-
filename = os.path.basename(filepath)
|
| 49 |
-
data = parser.parse_file(filepath)
|
| 50 |
-
texts= []
|
| 51 |
-
for d in data:
|
| 52 |
-
texts.append(Document(page_content=d, metadata={"source": filepath}))
|
| 53 |
-
documents.extend(texts)
|
| 54 |
-
print(len(documents))
|
| 55 |
-
if len(documents)>0:
|
| 56 |
-
document_id = [d.metadata['document_id'] + f"_{idx}" for (idx, d) in enumerate(documents)]
|
| 57 |
-
Pinecone.from_documents(documents, embeddings, ids=document_id, index_name=INDEX_NAME)
|
| 58 |
-
message = f"Add website to {INDEX_NAME} sucessfully"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
prompts/__pycache__/condense_llm.cpython-39.pyc
CHANGED
|
Binary files a/prompts/__pycache__/condense_llm.cpython-39.pyc and b/prompts/__pycache__/condense_llm.cpython-39.pyc differ
|
|
|
prompts/__pycache__/create_topic.cpython-39.pyc
ADDED
|
Binary file (603 Bytes). View file
|
|
|
prompts/__pycache__/custom_chain.cpython-39.pyc
CHANGED
|
Binary files a/prompts/__pycache__/custom_chain.cpython-39.pyc and b/prompts/__pycache__/custom_chain.cpython-39.pyc differ
|
|
|
prompts/__pycache__/decision_maker.cpython-39.pyc
ADDED
|
Binary file (794 Bytes). View file
|
|
|
prompts/__pycache__/llm.cpython-39.pyc
CHANGED
|
Binary files a/prompts/__pycache__/llm.cpython-39.pyc and b/prompts/__pycache__/llm.cpython-39.pyc differ
|
|
|
prompts/__pycache__/multi_queries.cpython-39.pyc
DELETED
|
Binary file (336 Bytes)
|
|
|
prompts/__pycache__/related_question.cpython-39.pyc
ADDED
|
Binary file (770 Bytes). View file
|
|
|
prompts/__pycache__/simple_chain.cpython-39.pyc
ADDED
|
Binary file (311 Bytes). View file
|
|
|
prompts/__pycache__/stage_analyzer.cpython-39.pyc
DELETED
|
Binary file (3.6 kB)
|
|
|