MindSearchCamp3 / app.py
Chengdong CAO
new UI
957f8e8
# import json
# import os
# import gradio as gr
# import requests
# from lagent.schema import AgentStatusCode
# os.system("python -m mindsearch.app --lang cn --model_format internlm_silicon &")
# PLANNER_HISTORY = []
# SEARCHER_HISTORY = []
# def rst_mem(history_planner: list, history_searcher: list):
# '''
# Reset the chatbot memory.
# '''
# history_planner = []
# history_searcher = []
# if PLANNER_HISTORY:
# PLANNER_HISTORY.clear()
# return history_planner, history_searcher
# def format_response(gr_history, agent_return):
# if agent_return['state'] in [
# AgentStatusCode.STREAM_ING, AgentStatusCode.ANSWER_ING
# ]:
# gr_history[-1][1] = agent_return['response']
# elif agent_return['state'] == AgentStatusCode.PLUGIN_START:
# thought = gr_history[-1][1].split('```')[0]
# if agent_return['response'].startswith('```'):
# gr_history[-1][1] = thought + '\n' + agent_return['response']
# elif agent_return['state'] == AgentStatusCode.PLUGIN_END:
# thought = gr_history[-1][1].split('```')[0]
# if isinstance(agent_return['response'], dict):
# gr_history[-1][
# 1] = thought + '\n' + f'```json\n{json.dumps(agent_return["response"], ensure_ascii=False, indent=4)}\n```' # noqa: E501
# elif agent_return['state'] == AgentStatusCode.PLUGIN_RETURN:
# assert agent_return['inner_steps'][-1]['role'] == 'environment'
# item = agent_return['inner_steps'][-1]
# gr_history.append([
# None,
# f"```json\n{json.dumps(item['content'], ensure_ascii=False, indent=4)}\n```"
# ])
# gr_history.append([None, ''])
# return
# def predict(history_planner, history_searcher):
# def streaming(raw_response):
# for chunk in raw_response.iter_lines(chunk_size=8192,
# decode_unicode=False,
# delimiter=b'\n'):
# if chunk:
# decoded = chunk.decode('utf-8')
# if decoded == '\r':
# continue
# if decoded[:6] == 'data: ':
# decoded = decoded[6:]
# elif decoded.startswith(': ping - '):
# continue
# response = json.loads(decoded)
# yield (response['response'], response['current_node'])
# global PLANNER_HISTORY
# PLANNER_HISTORY.append(dict(role='user', content=history_planner[-1][0]))
# new_search_turn = True
# url = 'http://localhost:8002/solve'
# headers = {'Content-Type': 'application/json'}
# data = {'inputs': PLANNER_HISTORY}
# raw_response = requests.post(url,
# headers=headers,
# data=json.dumps(data),
# timeout=20,
# stream=True)
# for resp in streaming(raw_response):
# agent_return, node_name = resp
# if node_name:
# if node_name in ['root', 'response']:
# continue
# agent_return = agent_return['nodes'][node_name]['detail']
# if new_search_turn:
# history_searcher.append([agent_return['content'], ''])
# new_search_turn = False
# format_response(history_searcher, agent_return)
# if agent_return['state'] == AgentStatusCode.END:
# new_search_turn = True
# yield history_planner, history_searcher
# else:
# new_search_turn = True
# format_response(history_planner, agent_return)
# if agent_return['state'] == AgentStatusCode.END:
# PLANNER_HISTORY = agent_return['inner_steps']
# yield history_planner, history_searcher
# return history_planner, history_searcher
# with gr.Blocks() as demo:
# gr.HTML("""<h1 align="center">MindSearch Gradio Demo</h1>""")
# gr.HTML("""<p style="text-align: center; font-family: Arial, sans-serif;">MindSearch is an open-source AI Search Engine Framework with Perplexity.ai Pro performance. You can deploy your own Perplexity.ai-style search engine using either closed-source LLMs (GPT, Claude) or open-source LLMs (InternLM2.5-7b-chat).</p>""")
# gr.HTML("""
# <div style="text-align: center; font-size: 16px;">
# <a href="https://github.com/InternLM/MindSearch" style="margin-right: 15px; text-decoration: none; color: #4A90E2;">๐Ÿ”— GitHub</a>
# <a href="https://arxiv.org/abs/2407.20183" style="margin-right: 15px; text-decoration: none; color: #4A90E2;">๐Ÿ“„ Arxiv</a>
# <a href="https://huggingface.co/papers/2407.20183" style="margin-right: 15px; text-decoration: none; color: #4A90E2;">๐Ÿ“š Hugging Face Papers</a>
# <a href="https://huggingface.co/spaces/internlm/MindSearch" style="text-decoration: none; color: #4A90E2;">๐Ÿค— Hugging Face Demo</a>
# </div>
# """)
# with gr.Row():
# with gr.Column(scale=10):
# with gr.Row():
# with gr.Column():
# planner = gr.Chatbot(label='planner',
# height=700,
# show_label=True,
# show_copy_button=True,
# bubble_full_width=False,
# render_markdown=True)
# with gr.Column():
# searcher = gr.Chatbot(label='searcher',
# height=700,
# show_label=True,
# show_copy_button=True,
# bubble_full_width=False,
# render_markdown=True)
# with gr.Row():
# user_input = gr.Textbox(show_label=False,
# placeholder='ๅธฎๆˆ‘ๆœ็ดขไธ€ไธ‹ InternLM ๅผ€ๆบไฝ“็ณป',
# lines=5,
# container=False)
# with gr.Row():
# with gr.Column(scale=2):
# submitBtn = gr.Button('Submit')
# with gr.Column(scale=1, min_width=20):
# emptyBtn = gr.Button('Clear History')
# def user(query, history):
# return '', history + [[query, '']]
# submitBtn.click(user, [user_input, planner], [user_input, planner],
# queue=False).then(predict, [planner, searcher],
# [planner, searcher])
# emptyBtn.click(rst_mem, [planner, searcher], [planner, searcher],
# queue=False)
# demo.queue()
# demo.launch(server_name='0.0.0.0',
# server_port=7860,
# inbrowser=True,
# share=True)
import json
import os
import gradio as gr
import requests
from lagent.schema import AgentStatusCode
os.system("python -m mindsearch.app --lang cn --model_format internlm_silicon &")
PLANNER_HISTORY = []
SEARCHER_HISTORY = []
def rst_mem(history_planner: list, history_searcher: list):
'''
Reset the chatbot memory.
'''
history_planner = []
history_searcher = []
if PLANNER_HISTORY:
PLANNER_HISTORY.clear()
return history_planner, history_searcher
def format_response(gr_history, agent_return):
if agent_return['state'] in [
AgentStatusCode.STREAM_ING, AgentStatusCode.ANSWER_ING
]:
gr_history[-1][1] = agent_return['response']
elif agent_return['state'] == AgentStatusCode.PLUGIN_START:
thought = gr_history[-1][1].split('```')[0]
if agent_return['response'].startswith('```'):
gr_history[-1][1] = thought + '\n' + agent_return['response']
elif agent_return['state'] == AgentStatusCode.PLUGIN_END:
thought = gr_history[-1][1].split('```')[0]
if isinstance(agent_return['response'], dict):
gr_history[-1][
1] = thought + '\n' + f'```json\n{json.dumps(agent_return["response"], ensure_ascii=False, indent=4)}\n```' # noqa: E501
elif agent_return['state'] == AgentStatusCode.PLUGIN_RETURN:
assert agent_return['inner_steps'][-1]['role'] == 'environment'
item = agent_return['inner_steps'][-1]
gr_history.append([
None,
f"```json\n{json.dumps(item['content'], ensure_ascii=False, indent=4)}\n```"
])
gr_history.append([None, ''])
return
def predict(history_planner, history_searcher):
def streaming(raw_response):
for chunk in raw_response.iter_lines(chunk_size=8192,
decode_unicode=False,
delimiter=b'\n'):
if chunk:
decoded = chunk.decode('utf-8')
if decoded == '\r':
continue
if decoded[:6] == 'data: ':
decoded = decoded[6:]
elif decoded.startswith(': ping - '):
continue
response = json.loads(decoded)
yield (response['response'], response['current_node'])
global PLANNER_HISTORY
PLANNER_HISTORY.append(dict(role='user', content=history_planner[-1][0]))
new_search_turn = True
url = 'http://localhost:8002/solve'
headers = {'Content-Type': 'application/json'}
data = {'inputs': PLANNER_HISTORY}
raw_response = requests.post(url,
headers=headers,
data=json.dumps(data),
timeout=20,
stream=True)
for resp in streaming(raw_response):
agent_return, node_name = resp
if node_name:
if node_name in ['root', 'response']:
continue
agent_return = agent_return['nodes'][node_name]['detail']
if new_search_turn:
history_searcher.append([agent_return['content'], ''])
new_search_turn = False
format_response(history_searcher, agent_return)
if agent_return['state'] == AgentStatusCode.END:
new_search_turn = True
yield history_planner, history_searcher
else:
new_search_turn = True
format_response(history_planner, agent_return)
if agent_return['state'] == AgentStatusCode.END:
PLANNER_HISTORY = agent_return['inner_steps']
yield history_planner, history_searcher
return history_planner, history_searcher
with gr.Blocks(css="""
.gr-button {
background-color: #4A90E2;
color: white;
border-radius: 10px;
padding: 10px 20px;
font-size: 16px;
border: none;
}
.gr-button:hover {
background-color: #357ABD;
}
.gr-textbox {
border: 2px solid #4A90E2;
border-radius: 5px;
padding: 10px;
font-size: 14px;
}
.gr-row {
margin-bottom: 10px;
}
.gr-chatbot {
border: 2px solid #4A90E2;
border-radius: 10px;
padding: 10px;
}
""") as demo:
gr.HTML("""<h1 align="center" style="color:#4A90E2;">MindSearch Gradio Demo</h1>""")
gr.HTML("""<p style="text-align: center; font-family: Arial, sans-serif; color: #333;">MindSearch is an open-source AI Search Engine Framework with Perplexity.ai Pro performance. You can deploy your own Perplexity.ai-style search engine using either closed-source LLMs (GPT, Claude) or open-source LLMs (InternLM2.5-7b-chat).</p>""")
gr.HTML("""
<div style="text-align: center; font-size: 16px; margin-bottom: 20px;">
<a href="https://github.com/InternLM/MindSearch" style="margin-right: 15px; text-decoration: none; color: #4A90E2;">๐Ÿ”— GitHub</a>
<a href="https://arxiv.org/abs/2407.20183" style="margin-right: 15px; text-decoration: none; color: #4A90E2;">๐Ÿ“„ Arxiv</a>
<a href="https://huggingface.co/papers/2407.20183" style="margin-right: 15px; text-decoration: none; color: #4A90E2;">๐Ÿ“š Hugging Face Papers</a>
<a href="https://huggingface.co/spaces/internlm/MindSearch" style="text-decoration: none; color: #4A90E2;">๐Ÿค— Hugging Face Demo</a>
</div>
""")
with gr.Row():
with gr.Column(scale=10):
with gr.Row():
with gr.Column():
planner = gr.Chatbot(label='Planner',
height=700,
show_label=True,
show_copy_button=True,
bubble_full_width=True,
render_markdown=True,
elem_id="planner")
with gr.Column():
searcher = gr.Chatbot(label='Searcher',
height=700,
show_label=True,
show_copy_button=True,
bubble_full_width=True,
render_markdown=True,
elem_id="searcher")
with gr.Row():
user_input = gr.Textbox(show_label=False,
placeholder='ๅธฎๆˆ‘ๆœ็ดขไธ€ไธ‹ InternLM ๅผ€ๆบไฝ“็ณป',
lines=5,
container=False,
elem_id="user_input")
with gr.Row():
with gr.Column(scale=2):
submitBtn = gr.Button('Submit', elem_id="submitBtn")
with gr.Column(scale=1, min_width=20):
emptyBtn = gr.Button('Clear History', elem_id="emptyBtn")
def user(query, history):
return '', history + [[query, '']]
submitBtn.click(user, [user_input, planner], [user_input, planner],
queue=False).then(predict, [planner, searcher],
[planner, searcher])
emptyBtn.click(rst_mem, [planner, searcher], [planner, searcher],
queue=False)
demo.queue()
demo.launch(server_name='0.0.0.0',
server_port=7860,
inbrowser=True,
share=True)