Spaces:
Runtime error
Runtime error
Commit ·
a4dfbf9
0
Parent(s):
Duplicate from Rams901/Cicero-interactive-QA
Browse files- .gitattributes +35 -0
- README.md +13 -0
- app.py +176 -0
- db_full/index.faiss +3 -0
- db_full/index.pkl +3 -0
- requirements.txt +8 -0
.gitattributes
ADDED
|
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
*.7z filter=lfs diff=lfs merge=lfs -text
|
| 2 |
+
*.arrow filter=lfs diff=lfs merge=lfs -text
|
| 3 |
+
*.bin filter=lfs diff=lfs merge=lfs -text
|
| 4 |
+
*.bz2 filter=lfs diff=lfs merge=lfs -text
|
| 5 |
+
*.ckpt filter=lfs diff=lfs merge=lfs -text
|
| 6 |
+
*.ftz filter=lfs diff=lfs merge=lfs -text
|
| 7 |
+
*.gz filter=lfs diff=lfs merge=lfs -text
|
| 8 |
+
*.h5 filter=lfs diff=lfs merge=lfs -text
|
| 9 |
+
*.joblib filter=lfs diff=lfs merge=lfs -text
|
| 10 |
+
*.lfs.* filter=lfs diff=lfs merge=lfs -text
|
| 11 |
+
*.mlmodel filter=lfs diff=lfs merge=lfs -text
|
| 12 |
+
*.model filter=lfs diff=lfs merge=lfs -text
|
| 13 |
+
*.msgpack filter=lfs diff=lfs merge=lfs -text
|
| 14 |
+
*.npy filter=lfs diff=lfs merge=lfs -text
|
| 15 |
+
*.npz filter=lfs diff=lfs merge=lfs -text
|
| 16 |
+
*.onnx filter=lfs diff=lfs merge=lfs -text
|
| 17 |
+
*.ot filter=lfs diff=lfs merge=lfs -text
|
| 18 |
+
*.parquet filter=lfs diff=lfs merge=lfs -text
|
| 19 |
+
*.pb filter=lfs diff=lfs merge=lfs -text
|
| 20 |
+
*.pickle filter=lfs diff=lfs merge=lfs -text
|
| 21 |
+
*.pkl filter=lfs diff=lfs merge=lfs -text
|
| 22 |
+
*.pt filter=lfs diff=lfs merge=lfs -text
|
| 23 |
+
*.pth filter=lfs diff=lfs merge=lfs -text
|
| 24 |
+
*.rar filter=lfs diff=lfs merge=lfs -text
|
| 25 |
+
*.safetensors filter=lfs diff=lfs merge=lfs -text
|
| 26 |
+
saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
| 27 |
+
*.tar.* filter=lfs diff=lfs merge=lfs -text
|
| 28 |
+
*.tflite filter=lfs diff=lfs merge=lfs -text
|
| 29 |
+
*.tgz filter=lfs diff=lfs merge=lfs -text
|
| 30 |
+
*.wasm filter=lfs diff=lfs merge=lfs -text
|
| 31 |
+
*.xz filter=lfs diff=lfs merge=lfs -text
|
| 32 |
+
*.zip filter=lfs diff=lfs merge=lfs -text
|
| 33 |
+
*.zst filter=lfs diff=lfs merge=lfs -text
|
| 34 |
+
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
| 35 |
+
db_full/index.faiss filter=lfs diff=lfs merge=lfs -text
|
README.md
ADDED
|
@@ -0,0 +1,13 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
---
|
| 2 |
+
title: Cicero Interactive QA
|
| 3 |
+
emoji: 🚀
|
| 4 |
+
colorFrom: green
|
| 5 |
+
colorTo: gray
|
| 6 |
+
sdk: gradio
|
| 7 |
+
sdk_version: 3.23.0
|
| 8 |
+
app_file: app.py
|
| 9 |
+
pinned: false
|
| 10 |
+
duplicated_from: Rams901/Cicero-interactive-QA
|
| 11 |
+
---
|
| 12 |
+
|
| 13 |
+
Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
|
app.py
ADDED
|
@@ -0,0 +1,176 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import gradio as gr
|
| 2 |
+
import numpy as np
|
| 3 |
+
from langchain.text_splitter import RecursiveCharacterTextSplitter
|
| 4 |
+
from langchain.chains import LLMChain
|
| 5 |
+
from langchain import PromptTemplate
|
| 6 |
+
import re
|
| 7 |
+
import pandas as pd
|
| 8 |
+
from langchain.vectorstores import FAISS
|
| 9 |
+
import requests
|
| 10 |
+
from typing import List
|
| 11 |
+
from langchain.schema import (
|
| 12 |
+
SystemMessage,
|
| 13 |
+
HumanMessage,
|
| 14 |
+
AIMessage
|
| 15 |
+
)
|
| 16 |
+
import os
|
| 17 |
+
from langchain.embeddings import HuggingFaceEmbeddings
|
| 18 |
+
from langchain.chat_models import ChatOpenAI
|
| 19 |
+
from langchain.llms.base import LLM
|
| 20 |
+
from typing import Optional, List, Mapping, Any
|
| 21 |
+
|
| 22 |
+
|
| 23 |
+
CHARACTER_CUT_OFF = 20000
|
| 24 |
+
text_splitter = RecursiveCharacterTextSplitter(chunk_size=1000, chunk_overlap=100)
|
| 25 |
+
|
| 26 |
+
embeddings = HuggingFaceEmbeddings()
|
| 27 |
+
db = FAISS.load_local('db_full', embeddings)
|
| 28 |
+
|
| 29 |
+
mp_docs = {}
|
| 30 |
+
llm = ChatOpenAI(
|
| 31 |
+
temperature=0,
|
| 32 |
+
model='gpt-3.5-turbo-16k'
|
| 33 |
+
)
|
| 34 |
+
|
| 35 |
+
|
| 36 |
+
def add_text(history, text):
|
| 37 |
+
|
| 38 |
+
print(history)
|
| 39 |
+
history = history + [(text, None)]
|
| 40 |
+
|
| 41 |
+
return history, ""
|
| 42 |
+
|
| 43 |
+
def retrieve_thoughts(query, ):
|
| 44 |
+
# print(db.similarity_search_with_score(query = query, k = k, fetch_k = k*10))
|
| 45 |
+
docs = db.similarity_search_with_score(query = query, k = 1000, fetch_k = len(db.index_to_docstore_id.values()))
|
| 46 |
+
|
| 47 |
+
# TO-DO: What if user query doesn't match what we provide as documents
|
| 48 |
+
|
| 49 |
+
tier_1 = [doc[0] for doc in docs if ((doc[1] < 1))][:5]
|
| 50 |
+
tier_2 = [doc[0] for doc in docs if ((doc[1] > 1.1)*(doc[1] < 1.2))]
|
| 51 |
+
|
| 52 |
+
return {'tier 1':tier_1, 'tier 2': tier_2[:min(len(tier_2, ), 5)]}
|
| 53 |
+
|
| 54 |
+
def qa_retrieve(chatlog,):
|
| 55 |
+
|
| 56 |
+
print(f"Chatlog qa: {chatlog}")
|
| 57 |
+
query = chatlog[-1][0]
|
| 58 |
+
docs = ""
|
| 59 |
+
|
| 60 |
+
global db
|
| 61 |
+
print(db)
|
| 62 |
+
|
| 63 |
+
global mp_docs
|
| 64 |
+
thoughts = retrieve_thoughts(query)
|
| 65 |
+
if not(thoughts):
|
| 66 |
+
|
| 67 |
+
if mp_docs:
|
| 68 |
+
thoughts = mp_docs
|
| 69 |
+
else:
|
| 70 |
+
mp_docs = thoughts
|
| 71 |
+
|
| 72 |
+
tier_1 = thoughts['tier 1']
|
| 73 |
+
tier_2 = thoughts['tier 2']
|
| 74 |
+
|
| 75 |
+
tier_1 = [f"title: {thought.metadata['title']}\n Content: {thought.page_content}" for thought in tier_1]
|
| 76 |
+
tier_2 = [f"title: {thought.metadata['title']}\n Content: {thought.page_content}" for thought in tier_2]
|
| 77 |
+
|
| 78 |
+
print(f"QUERY: {query}\nTIER 1: {tier_1}\nTIER2: {tier_2}")
|
| 79 |
+
# print(f"DOCS RETRIEVED: {mp_docs.values}")
|
| 80 |
+
|
| 81 |
+
# Cynthesis Generation
|
| 82 |
+
session_prompt = """ A bot that is open to discussions about different cultural, philosophical and political exchanges. You will use do different analysis to the articles provided to me. Stay truthful and if you weren't provided any resources give your oppinion only."""
|
| 83 |
+
task = "Extract relevant content to the query, and create a 300-word synthesis on the content is providing relevant to the query. Identify Themes for articles given in bullet points format. Expand on each theme and share specific quotes from the authors."
|
| 84 |
+
|
| 85 |
+
prompt = PromptTemplate(
|
| 86 |
+
input_variables=["query", "task", "session_prompt", "articles"],
|
| 87 |
+
template="""
|
| 88 |
+
You are a {session_prompt}
|
| 89 |
+
{task}
|
| 90 |
+
|
| 91 |
+
query: {query}
|
| 92 |
+
|
| 93 |
+
Articles:
|
| 94 |
+
{articles}
|
| 95 |
+
|
| 96 |
+
Make sure to quote the article used if the argument corresponds to the query.
|
| 97 |
+
Use careful reasoning to explain your answer and give your conclusion about this.
|
| 98 |
+
""",
|
| 99 |
+
)
|
| 100 |
+
|
| 101 |
+
# llm = BardLLM()
|
| 102 |
+
chain = LLMChain(llm=llm, prompt = prompt)
|
| 103 |
+
|
| 104 |
+
response = chain.run(query=query, articles="\n".join(tier_1), session_prompt = session_prompt, task = task)
|
| 105 |
+
|
| 106 |
+
# Generate related questions
|
| 107 |
+
prompt_q = PromptTemplate(
|
| 108 |
+
input_variables=[ "session_prompt", "articles"],
|
| 109 |
+
template="""
|
| 110 |
+
You are a {session_prompt}
|
| 111 |
+
Give general/global questions related the following articles:
|
| 112 |
+
|
| 113 |
+
Articles:
|
| 114 |
+
{articles}
|
| 115 |
+
|
| 116 |
+
Make sure not to ask specific questions keep them general.
|
| 117 |
+
""",
|
| 118 |
+
)
|
| 119 |
+
|
| 120 |
+
chain_q = LLMChain(llm=llm, prompt = prompt_q)
|
| 121 |
+
|
| 122 |
+
questions = chain_q.run(session_prompt = session_prompt, articles = "\n".join(tier_2), )
|
| 123 |
+
questions = questions[questions.index('1'):]
|
| 124 |
+
|
| 125 |
+
# TO-DO: initiate models in another function, refactor code to be reusable
|
| 126 |
+
|
| 127 |
+
response = response + f"\n{questions}"
|
| 128 |
+
|
| 129 |
+
chatlog[-1][1] = response
|
| 130 |
+
return chatlog
|
| 131 |
+
|
| 132 |
+
def flush():
|
| 133 |
+
|
| 134 |
+
return None
|
| 135 |
+
|
| 136 |
+
with gr.Blocks() as demo:
|
| 137 |
+
chatbot = gr.Chatbot([], elem_id="chatbot").style(height=750)
|
| 138 |
+
|
| 139 |
+
with gr.Row():
|
| 140 |
+
with gr.Column(scale=0.65):
|
| 141 |
+
txt = gr.components.Textbox(
|
| 142 |
+
placeholder="Ask me anything",show_label=False
|
| 143 |
+
)
|
| 144 |
+
|
| 145 |
+
with gr.Column(scale=0.15, min_width=0):
|
| 146 |
+
btn = gr.UploadButton("📁", file_types=["text"], file_count = 'multiple')
|
| 147 |
+
|
| 148 |
+
# with gr.Row():
|
| 149 |
+
|
| 150 |
+
# with gr.Column(scale=0.85):
|
| 151 |
+
|
| 152 |
+
# url = gr.components.Textbox(
|
| 153 |
+
|
| 154 |
+
# label="Website URLs",
|
| 155 |
+
# placeholder="https://www.example.org/ https://www.example.com/",
|
| 156 |
+
# )
|
| 157 |
+
|
| 158 |
+
with gr.Column(scale=0.15, min_width = 0):
|
| 159 |
+
send_btn = gr.Button("📨")
|
| 160 |
+
|
| 161 |
+
with gr.Row():
|
| 162 |
+
with gr.Column():
|
| 163 |
+
clear = gr.Button("Clear")
|
| 164 |
+
pdf_content = gr.Textbox("", visible = False)
|
| 165 |
+
txt.submit(add_text, [chatbot, txt], [chatbot, txt]).then(
|
| 166 |
+
qa_retrieve, [chatbot], chatbot
|
| 167 |
+
).then(lambda : (None), outputs = [ pdf_content])
|
| 168 |
+
# btn.upload(add_file, [chatbot, btn], [chatbot,], batch = True).then(qa_retrieve, [chatbot], chatbot)
|
| 169 |
+
|
| 170 |
+
send_btn.click(add_text, [chatbot, txt, ], [chatbot, txt]).then(
|
| 171 |
+
qa_retrieve, [chatbot, ], chatbot).then(lambda : None, outputs = [ pdf_content])
|
| 172 |
+
|
| 173 |
+
clear.click(flush, None, outputs = chatbot, queue=False)
|
| 174 |
+
|
| 175 |
+
demo.queue(concurrency_count = 4)
|
| 176 |
+
demo.launch()
|
db_full/index.faiss
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d0650f87f0b1cc577ebd8b57823cda7f94959275e0cb58743ba5df9fe0499542
|
| 3 |
+
size 112687149
|
db_full/index.pkl
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:30b520b9b7231267a1908b0484ad1ae865e62f77a40ec1bcb6805ba19020ae7f
|
| 3 |
+
size 31576557
|
requirements.txt
ADDED
|
@@ -0,0 +1,8 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
pandas
|
| 2 |
+
langchain
|
| 3 |
+
openai
|
| 4 |
+
FAISS-gpu
|
| 5 |
+
tiktoken
|
| 6 |
+
transformers
|
| 7 |
+
sentence_transformers
|
| 8 |
+
|