# Load model directly
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("ConicCat/Nemo-Humanities")
model = AutoModelForCausalLM.from_pretrained("ConicCat/Nemo-Humanities")
messages = [
{"role": "user", "content": "Who are you?"},
]
inputs = tokenizer.apply_chat_template(
messages,
add_generation_prompt=True,
tokenize=True,
return_dict=True,
return_tensors="pt",
).to(model.device)
outputs = model.generate(**inputs, max_new_tokens=40)
print(tokenizer.decode(outputs[0][inputs["input_ids"].shape[-1]:]))Quick Links
A finetune of nemo produced to test the theory that because the humanities (writing, social intelligence, roleplay, and theory of mind) seem to cross generalize, training all of them at once is superior to focusing on any one field.
- Downloads last month
- 3
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("text-generation", model="ConicCat/Nemo-Humanities") messages = [ {"role": "user", "content": "Who are you?"}, ] pipe(messages)