# Load model directly
from transformers import AutoTokenizer, AutoModel
tokenizer = AutoTokenizer.from_pretrained("bumblebee-testing/tiny-random-GPT2Model-sharded")
model = AutoModel.from_pretrained("bumblebee-testing/tiny-random-GPT2Model-sharded")Quick Links
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("feature-extraction", model="bumblebee-testing/tiny-random-GPT2Model-sharded")