File size: 656 Bytes
868b518
3e314bf
6495bde
 
868b518
3e314bf
 
 
 
868b518
3e314bf
 
868b518
3e314bf
 
 
868b518
3e314bf
 
868b518
3e314bf
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
import gradio as gr
from model import ask
import torch
torch.cuda.is_available()  # This may help trigger ZeroGPU provisioning

def respond_with_rag(message, chat_history):
    response = ask(message)
    chat_history.append((message, response))
    return "", chat_history

with gr.Blocks() as demo:
    gr.Markdown("# 🤖 Nutrition Chatbot with RAG")

    chatbot = gr.Chatbot()
    msg = gr.Textbox(label="Ask about nutrition", placeholder="e.g. What are good sources of protein?")
    clear = gr.Button("Clear")

    msg.submit(respond_with_rag, [msg, chatbot], [msg, chatbot])
    clear.click(lambda: None, None, chatbot, queue=False)

demo.launch()