| import gradio as gr | |
| from transformers import pipeline | |
| # Load h2oGPT model | |
| generator = pipeline( | |
| "text-generation", | |
| model="h2oai/h2ogpt-oig-oasst1-6.9b", | |
| device=-1 # CPU | |
| ) | |
| def chat(prompt): | |
| response = generator( | |
| prompt, | |
| max_new_tokens=200, | |
| temperature=0.7, | |
| do_sample=True | |
| ) | |
| return response[0]["generated_text"] | |
| demo = gr.Interface( | |
| fn=chat, | |
| inputs="text", | |
| outputs="text", | |
| title="h2oGPT Chatbot", | |
| description="Ask anything to h2oGPT" | |
| ) | |
| demo.launch() | |