# Load model directly
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("Bitsy/Not-LLaMA-7B-Pytorch-Transformer-Compatible")
model = AutoModelForCausalLM.from_pretrained("Bitsy/Not-LLaMA-7B-Pytorch-Transformer-Compatible")Quick Links
YAML Metadata Warning:empty or missing yaml metadata in repo card
Check out the documentation for more information.
This is NOT the LLaMA model released recently converted to work with Transformers. It is NOT that. Simply use this model as you would any other now. Below is an example:
tokenizer = transformers.LLaMATokenizer.from_pretrained("Bitsy/Not-LLaMA-7B-Pytorch-Transformer-Compatible")
model = transformers.LLaMAForCausalLM.from_pretrained("Bitsy/Not-LLaMA-7B-Pytorch-Transformer-Compatible")
- Downloads last month
- 13
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("text-generation", model="Bitsy/Not-LLaMA-7B-Pytorch-Transformer-Compatible")