# Load model directly
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("h2oai/h2ogpt-16k-codellama-7b-python")
model = AutoModelForCausalLM.from_pretrained("h2oai/h2ogpt-16k-codellama-7b-python")Quick Links
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("text-generation", model="h2oai/h2ogpt-16k-codellama-7b-python")