# Gradio Chatbot with Gemma-3-1B-IT from gradio import Interface, Chatbot from transformers import AutoModelForCausalLM, AutoTokenizer # Load model and tokenizer model_name = 'google/gemma-3-1b-it' tokenizer = AutoTokenizer.from_pretrained(model_name) model = AutoModelForCausalLM.from_pretrained(model_name) # Chatbot handler def respond(message, chat_history): inputs = tokenizer(message, return_tensors='pt') outputs = model.generate(**inputs, max_new_tokens=100) response = tokenizer.decode(outputs[0], skip_special_tokens=True) return response # Create interface chatbot = Chatbot() iface = Interface(fn=respond, inputs=chatbot, outputs=chatbot) # Launch app if __name__ == '__main__': iface.launch()