| | import gradio as gr |
| | import os |
| | from utils import generate_response, emb2info, pre_prompt, get_embedding |
| | os.environ['NO_PROXY'] = '127.0.0.1' |
| | print(os.getcwd()) |
| |
|
| | def predict(user_input, history=[]): |
| | |
| |
|
| | emb_user = get_embedding(user_input) |
| | info_to_add, retrieval_text = emb2info(emb_user) |
| | response = generate_response(pre_prompt + info_to_add + \ |
| | "\n \n User : " + user_input + "\n Chat bot :") |
| | |
| | history.append((user_input,response)) |
| | |
| | return history, history |
| |
|
| |
|
| | with gr.Blocks() as app: |
| | gr.Markdown( |
| | "## Bienvenue sur l'interface demo de SARA " |
| | ) |
| |
|
| | logo_URL = "file/static/logo_sara.png" |
| | image = "<center> <img src= {} width=150px></center>".format(logo_URL) |
| | gr.HTML(image) |
| |
|
| | chatbot = gr.Chatbot() |
| | state = gr.State([]) |
| | |
| | with gr.Row(): |
| | txt = gr.Textbox(show_label=False, placeholder="Entrez votre question").style(container=False) |
| |
|
| | gr.Examples( |
| | examples=[ |
| | "Who should I call if I struggle with the GPU ? ", |
| | "Who can I call if I need help on diffusion models ? ", |
| | "Qui peut m'aider en NLP ?", |
| | "Qui est un specialiste de la segmentation d'image dans l'equipe ?", |
| | ], |
| | inputs=txt, |
| | ) |
| |
|
| | txt.submit(predict, [txt, state], [chatbot, state]) |
| | |
| | gr.HTML( |
| | "️<center> Created with ❤️ by @louis_ulmer & @aurelien_lac" |
| | ) |
| |
|
| | app.launch(auth=("cellule_ia", "pass")) |