# Load model directly
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("Undi95/ReasoningEngine")
model = AutoModelForCausalLM.from_pretrained("Undi95/ReasoningEngine")Quick Links
https://huggingface.co/stabilityai/StableBeluga-13B + https://huggingface.co/jondurbin/airoboros-lmoe-13b-2.1/tree/main/adapters/reasoning weight: 0.42 name: ReasoningEngine
For Dampf.
- Downloads last month
- 808
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("text-generation", model="Undi95/ReasoningEngine")