Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from model import ask | |
| import torch | |
| torch.cuda.is_available() # This may help trigger ZeroGPU provisioning | |
| def respond_with_rag(message, chat_history): | |
| response = ask(message) | |
| chat_history.append((message, response)) | |
| return "", chat_history | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# 🤖 Nutrition Chatbot with RAG") | |
| chatbot = gr.Chatbot() | |
| msg = gr.Textbox(label="Ask about nutrition", placeholder="e.g. What are good sources of protein?") | |
| clear = gr.Button("Clear") | |
| msg.submit(respond_with_rag, [msg, chatbot], [msg, chatbot]) | |
| clear.click(lambda: None, None, chatbot, queue=False) | |
| demo.launch() | |