# Load model directly
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("FreedomIntelligence/PlatoLM-7B")
model = AutoModelForCausalLM.from_pretrained("FreedomIntelligence/PlatoLM-7B")Quick Links
This version is based on Llama-2-7b, and more versions are coming soon.
More details please see the following link: https://github.com/FreedomIntelligence/PlatoLM
- Downloads last month
- 13
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("text-generation", model="FreedomIntelligence/PlatoLM-7B")