Spaces:
Sleeping
Sleeping
File size: 4,495 Bytes
292920a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 |
import gradio as gr
from backend import BackEnd
import argparse
import configparser
class Demo:
def __init__(self, config):
self.config = config
self.backend = BackEnd(self.config)
self.lang = self.config['General']['language'].lower()[:2]
def process_query(self,history, query):
res,context = self.backend.process_query(query)
documents = '\n\n'.join(context)
if self.lang == 'fr':
response = res['réponse']
rationale = res['justification']
elif lang == 'en':
response = res['answer']
rationale = res['rationale']
source = res['source']
history.append((query,response))#, "See details below))"))
return history, "",response, rationale, source, documents
def run_demo(self):
if self.lang == 'fr':
with gr.Blocks(theme=gr.themes.Glass()) as demo:
gr.Image(value = 'crim_logo.png')
gr.Markdown("## Démonstration d'IA générative")
chatbot = gr.Chatbot(label="Conversation", height=400)
gr.Markdown("Saisissez une requête ci-dessous et voyez la réponse et le raisonnement du système.")
with gr.Row():
query_input = gr.Textbox(
show_label=False,
placeholder="Tapez quelque chose ...",
lines=1
)
send_btn = gr.Button("Envoyer", scale = 0)
gr.Markdown("### Dernière réponse")
with gr.Row():
answer_output = gr.Textbox(label="Réponse", lines = 3, interactive=False)
with gr.Row():
reasoning_output = gr.Textbox(label="Raisonnement du système", lines = 5,interactive=False)
with gr.Row():
source_output = gr.Textbox(label="Source", interactive=False)
with gr.Accordion("Documents récupérés", open=False):
docs_output = gr.Textbox(label="Documents justificatifs", interactive=False, lines=30)
inputs = [chatbot, query_input]
outputs = [chatbot, query_input, answer_output, reasoning_output, source_output,docs_output]
query_input.submit(fn=self.process_query, inputs=inputs, outputs=outputs)
send_btn.click(fn=self.process_query, inputs=inputs, outputs=outputs)
elif self.lang == 'en':
with gr.Blocks(theme=gr.themes.Glass()) as demo:
gr.Image(value = 'crim_logo.png')
gr.Markdown("## Generative AI Chat Demo with Structured Outputs")
chatbot = gr.Chatbot(label="Conversation", height=400)
with gr.Row():
query_input = gr.Textbox(
show_label=False,
placeholder="Type your query here and press Enter...",
lines=1
)
send_btn = gr.Button("Send", scale = 0)
gr.Markdown("### Latest Response Details")
with gr.Row():
answer_output = gr.Textbox(label="Answer", interactive=False)
with gr.Row():
reasoning_output = gr.Textbox(label="System Reasoning", interactive=False)
with gr.Row():
source_output = gr.Textbox(label="Source", interactive=False)
with gr.Accordion("Retrieved Documents", open=False):
docs_output = gr.Textbox(label="Supporting Documents", interactive=False, lines=30)
inputs = [chatbot, query_input]
outputs = [chatbot, query_input, answer_output, reasoning_output, source_output,docs_output]
query_input.submit(fn=self.process_query, inputs=inputs, outputs=outputs)
send_btn.click(fn=self.process_query, inputs=inputs, outputs=outputs)
demo.launch()
def main():
parser = argparse.ArgumentParser()
# parser.add_argument('--config_file', type=str, required=True, help='File containing the configuration for the backend (in .ini format)')
# args = parser.parse_args()
config = configparser.ConfigParser()
config.read('config.ini')
demo = Demo(config)
demo.run_demo()
if __name__ == "__main__":
main()
|