Arcana Qwen3-2.4B-A0.6B
Collection
Qwen3 MoE model • 5 items • Updated • 2
# Load model directly
from transformers import AutoTokenizer, AutoModelForSequenceClassification
tokenizer = AutoTokenizer.from_pretrained("suayptalha/MoE-Router-v2")
model = AutoModelForSequenceClassification.from_pretrained("suayptalha/MoE-Router-v2")labels = ['code', 'if', 'math', 'medical']
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("text-classification", model="suayptalha/MoE-Router-v2")