| from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline | |
| import gradio as gr | |
| import torch | |
| model_id = "tiiuae/falcon-rw-1b" | |
| tokenizer = AutoTokenizer.from_pretrained(model_id) | |
| model = AutoModelForCausalLM.from_pretrained(model_id) | |
| pipe = pipeline( | |
| "text-generation", | |
| model=model, | |
| tokenizer=tokenizer, | |
| device=0 if torch.cuda.is_available() else -1, | |
| ) | |
| def chat(user_input, history): | |
| prompt = "" | |
| for user, bot in history: | |
| prompt += f"User: {user}\nBot: {bot}\n" | |
| prompt += f"User: {user_input}\nBot:" | |
| result = pipe(prompt, max_new_tokens=64, do_sample=False) | |
| output = result[0]["generated_text"] | |
| reply = output.split("Bot:")[-1].strip() | |
| history.append((user_input, reply)) | |
| return history, history | |
| gr.ChatInterface( | |
| fn=chat, | |
| chatbot=gr.Chatbot(), | |
| title="Tiny Falcon Chatbot", | |
| theme="default", | |
| ).launch() | |