# Load model directly
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("matorus/replit-openorca", trust_remote_code=True)
model = AutoModelForCausalLM.from_pretrained("matorus/replit-openorca", trust_remote_code=True)Quick Links
replit/replit-code-v1-3b finetuned on Open-Orca/OpenOrca.
- Downloads last month
- 8
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("text-generation", model="matorus/replit-openorca", trust_remote_code=True)