Spaces:
Sleeping
Sleeping
Upload app.py
Browse files
app.py
CHANGED
|
@@ -205,46 +205,51 @@ def deepl_memory(ss: SessionState) -> (SessionState):
|
|
| 205 |
# DEEPL_API_KEY = os.getenv("DEEPL_API_KEY")
|
| 206 |
|
| 207 |
def web_search(ss: SessionState, query) -> (SessionState, str):
|
| 208 |
-
search = DuckDuckGoSearchRun(verbose=True)
|
| 209 |
-
web_result = search(query)
|
| 210 |
|
| 211 |
-
|
| 212 |
-
names = []
|
| 213 |
-
names.extend(name_detector(query))
|
| 214 |
-
names.extend(name_detector(web_result))
|
| 215 |
-
if len(names)==0:
|
| 216 |
-
names = ""
|
| 217 |
-
elif len(names)==1:
|
| 218 |
-
names = names[0]
|
| 219 |
-
else:
|
| 220 |
-
names = ", ".join(names)
|
| 221 |
-
|
| 222 |
-
if ss.current_model == "gpt-3.5-turbo":
|
| 223 |
-
text = [query, web_result]
|
| 224 |
-
params = {
|
| 225 |
-
"auth_key": DEEPL_API_KEY,
|
| 226 |
-
"text": text,
|
| 227 |
-
"target_lang": "EN",
|
| 228 |
-
"source_lang": "JA",
|
| 229 |
-
"tag_handling": "xml",
|
| 230 |
-
"igonere_tags": "x",
|
| 231 |
-
}
|
| 232 |
-
request = requests.post(DEEPL_API_ENDPOINT, data=params)
|
| 233 |
-
response = request.json()
|
| 234 |
|
| 235 |
-
|
| 236 |
-
web_result =
|
| 237 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 238 |
|
| 239 |
if names != "":
|
| 240 |
web_query = f"""
|
| 241 |
{query}
|
| 242 |
-
Use the following
|
| 243 |
-
|
| 244 |
-
|
| 245 |
""".strip()
|
| 246 |
else:
|
| 247 |
-
web_query = query + "\nUse the following
|
| 248 |
|
| 249 |
|
| 250 |
return ss, web_query
|
|
@@ -263,16 +268,17 @@ def web_search(ss: SessionState, query) -> (SessionState, str):
|
|
| 263 |
|
| 264 |
# Tokens: OpenAI 104/ Llama 105 <- In Japanese: Tokens: OpenAI 191/ Llama 162
|
| 265 |
sys_chat_message = """
|
| 266 |
-
You are an outstanding AI concierge.
|
| 267 |
-
them with many specific and detailed information in Japanese.
|
| 268 |
-
do make up an answer and says
|
|
|
|
| 269 |
""".replace("\n", "")
|
| 270 |
|
| 271 |
chat_common_format = """
|
| 272 |
===
|
| 273 |
Question: {query}
|
| 274 |
===
|
| 275 |
-
Conversation History
|
| 276 |
{chat_history}
|
| 277 |
===
|
| 278 |
日本語の回答: """
|
|
@@ -286,10 +292,9 @@ chat_template_llama2 = f"<s>[INST] <<SYS>>{sys_chat_message}<</SYS>>{chat_common
|
|
| 286 |
# Tokens: OpenAI 113/ Llama 111 <- In Japanese: Tokens: OpenAI 256/ Llama 225
|
| 287 |
sys_qa_message = """
|
| 288 |
You are an AI concierge who carefully answers questions from customers based on references.
|
| 289 |
-
|
| 290 |
-
Japanese using sentences extracted from the following references. If you do
|
| 291 |
-
do not make up an answer and reply, "誠に申し訳ございませんが、その点についてはわかりかねます".
|
| 292 |
-
Ignore Conversation History.
|
| 293 |
""".replace("\n", "")
|
| 294 |
|
| 295 |
qa_common_format = """
|
|
@@ -297,7 +302,7 @@ qa_common_format = """
|
|
| 297 |
Question: {query}
|
| 298 |
References: {context}
|
| 299 |
===
|
| 300 |
-
Conversation History
|
| 301 |
{chat_history}
|
| 302 |
===
|
| 303 |
日本語の回答: """
|
|
@@ -332,7 +337,8 @@ query_generator_template_llama2 = f"<s>[INST] <<SYS>>{query_generator_message}<<
|
|
| 332 |
question_prompt_message = """
|
| 333 |
From the following references, extract key information relevant to the question
|
| 334 |
and summarize it in a natural English sentence with clear subject, verb, object,
|
| 335 |
-
and complement.
|
|
|
|
| 336 |
""".replace("\n", "")
|
| 337 |
|
| 338 |
question_prompt_common_format = """
|
|
@@ -1058,8 +1064,8 @@ with gr.Blocks() as demo:
|
|
| 1058 |
)
|
| 1059 |
with gr.Column(scale=5):
|
| 1060 |
with gr.Row():
|
| 1061 |
-
qa_flag = gr.Checkbox(label="QA mode", value=
|
| 1062 |
-
web_flag = gr.Checkbox(label="Web Search", value=
|
| 1063 |
with gr.Row():
|
| 1064 |
query_send_btn = gr.Button(value="▶")
|
| 1065 |
|
|
@@ -1082,5 +1088,5 @@ with gr.Blocks() as demo:
|
|
| 1082 |
|
| 1083 |
if __name__ == "__main__":
|
| 1084 |
demo.queue(concurrency_count=5)
|
| 1085 |
-
demo.launch(debug=True
|
| 1086 |
|
|
|
|
| 205 |
# DEEPL_API_KEY = os.getenv("DEEPL_API_KEY")
|
| 206 |
|
| 207 |
def web_search(ss: SessionState, query) -> (SessionState, str):
|
|
|
|
|
|
|
| 208 |
|
| 209 |
+
search = DuckDuckGoSearchRun(verbose=True)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 210 |
|
| 211 |
+
for i in range(3):
|
| 212 |
+
web_result = search(query)
|
| 213 |
+
|
| 214 |
+
# 人名の抽出
|
| 215 |
+
names = []
|
| 216 |
+
names.extend(name_detector(query))
|
| 217 |
+
names.extend(name_detector(web_result))
|
| 218 |
+
if len(names)==0:
|
| 219 |
+
names = ""
|
| 220 |
+
elif len(names)==1:
|
| 221 |
+
names = names[0]
|
| 222 |
+
else:
|
| 223 |
+
names = ", ".join(names)
|
| 224 |
+
|
| 225 |
+
if ss.current_model == "gpt-3.5-turbo":
|
| 226 |
+
text = [query, web_result]
|
| 227 |
+
params = {
|
| 228 |
+
"auth_key": DEEPL_API_KEY,
|
| 229 |
+
"text": text,
|
| 230 |
+
"target_lang": "EN",
|
| 231 |
+
"source_lang": "JA",
|
| 232 |
+
"tag_handling": "xml",
|
| 233 |
+
"ignore_tags": "x",
|
| 234 |
+
}
|
| 235 |
+
request = requests.post(DEEPL_API_ENDPOINT, data=params)
|
| 236 |
+
response = request.json()
|
| 237 |
+
|
| 238 |
+
query = response["translations"][0]["text"]
|
| 239 |
+
web_result = response["translations"][1]["text"]
|
| 240 |
+
web_result = ss.web_summary_chain({'query': query, 'context': web_result})['text']
|
| 241 |
+
if web_result != "NO INFO":
|
| 242 |
+
break
|
| 243 |
|
| 244 |
if names != "":
|
| 245 |
web_query = f"""
|
| 246 |
{query}
|
| 247 |
+
Use the following Suggested Answer Source as a reliable reference to answer the question above in Japanese. When translating names of people, refer to Names as a translation guide.
|
| 248 |
+
Suggested Answer Source: {web_result}
|
| 249 |
+
Names: {names}
|
| 250 |
""".strip()
|
| 251 |
else:
|
| 252 |
+
web_query = query + "\nUse the following Suggested Answer Source as a reliable reference to answer the question above in the Japanese.\n===\nSuggested Answer Source: " + web_result + "\n"
|
| 253 |
|
| 254 |
|
| 255 |
return ss, web_query
|
|
|
|
| 268 |
|
| 269 |
# Tokens: OpenAI 104/ Llama 105 <- In Japanese: Tokens: OpenAI 191/ Llama 162
|
| 270 |
sys_chat_message = """
|
| 271 |
+
You are an outstanding AI concierge. Understand the intent of the customer's questions based on
|
| 272 |
+
the conversation history. Then, answer them with many specific and detailed information in Japanese.
|
| 273 |
+
If you do not know the answer to a question, do make up an answer and says
|
| 274 |
+
"誠に申し訳ございませんが、その点についてはわかりかねます".
|
| 275 |
""".replace("\n", "")
|
| 276 |
|
| 277 |
chat_common_format = """
|
| 278 |
===
|
| 279 |
Question: {query}
|
| 280 |
===
|
| 281 |
+
Conversation History:
|
| 282 |
{chat_history}
|
| 283 |
===
|
| 284 |
日本語の回答: """
|
|
|
|
| 292 |
# Tokens: OpenAI 113/ Llama 111 <- In Japanese: Tokens: OpenAI 256/ Llama 225
|
| 293 |
sys_qa_message = """
|
| 294 |
You are an AI concierge who carefully answers questions from customers based on references.
|
| 295 |
+
Understand the intent of the customer's questions based on the conversation history. Then, give
|
| 296 |
+
a specific answer in Japanese using sentences extracted from the following references. If you do
|
| 297 |
+
not know the answer, do not make up an answer and reply, "誠に申し訳ございませんが、その点についてはわかりかねます".
|
|
|
|
| 298 |
""".replace("\n", "")
|
| 299 |
|
| 300 |
qa_common_format = """
|
|
|
|
| 302 |
Question: {query}
|
| 303 |
References: {context}
|
| 304 |
===
|
| 305 |
+
Conversation History:
|
| 306 |
{chat_history}
|
| 307 |
===
|
| 308 |
日本語の回答: """
|
|
|
|
| 337 |
question_prompt_message = """
|
| 338 |
From the following references, extract key information relevant to the question
|
| 339 |
and summarize it in a natural English sentence with clear subject, verb, object,
|
| 340 |
+
and complement. If there is no information in the reference that answers the question,
|
| 341 |
+
do not summarize and simply answer "NO INFO"
|
| 342 |
""".replace("\n", "")
|
| 343 |
|
| 344 |
question_prompt_common_format = """
|
|
|
|
| 1064 |
)
|
| 1065 |
with gr.Column(scale=5):
|
| 1066 |
with gr.Row():
|
| 1067 |
+
qa_flag = gr.Checkbox(label="QA mode", value=False, min_width=60, interactive=True)
|
| 1068 |
+
web_flag = gr.Checkbox(label="Web Search", value=True, min_width=60, interactive=True)
|
| 1069 |
with gr.Row():
|
| 1070 |
query_send_btn = gr.Button(value="▶")
|
| 1071 |
|
|
|
|
| 1088 |
|
| 1089 |
if __name__ == "__main__":
|
| 1090 |
demo.queue(concurrency_count=5)
|
| 1091 |
+
demo.launch(debug=True)
|
| 1092 |
|