Text Generation
Transformers
PyTorch
English
gpt_neox
Text Generation
causal-lm
text-generation-inference
# Load model directly
from transformers import AutoTokenizer, AutoModelForCausalLM
tokenizer = AutoTokenizer.from_pretrained("afterless/reverse-pythia-160m")
model = AutoModelForCausalLM.from_pretrained("afterless/reverse-pythia-160m")Quick Links
from transformers import GPTNeoXForCausalLM, AutoTokenizer
tokenizer = AutoTokenizer.from_pretrained(
"afterless/reverse-pythia-160m"
)
model = GPTNeoXForCausalLM.from_pretrained(
"afterless/reverse-pythia-160m"
)
inputs = tokenizer(
"but I told him, the cheese was the best",
return_token_type_ids=False,
return_tensors="pt"
)
inputs['input_ids'] = t.flip(inputs.input_ids, (1,))
tokens = t.flip(model.generate(**inputs), (1,))
tokenizer.decode(tokens[0])
- Downloads last month
- 11
# Use a pipeline as a high-level helper from transformers import pipeline pipe = pipeline("text-generation", model="afterless/reverse-pythia-160m")