from transformers import AutoTokenizer, AutoModelForCausalLM import torch import gradio as gr model_name = "tiiuae/falcon-rw-1b" # Use falcon-7b-instruct for GPU tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name, trust_remote_code=True) device = "cuda" if torch.cuda.is_available() else "cpu" model.to(device) def chat_with_falcon(prompt): inputs = tokenizer(prompt, return_tensors="pt").to(device) outputs = model.generate( **inputs, max_new_tokens=200, do_sample=True, temperature=0.7 ) reply = tokenizer.decode(outputs[0], skip_special_tokens=True) return reply gr.Interface(fn=chat_with_falcon, inputs="text", outputs="text", title="Falcon Chatbot").launch()