metadata
library_name: transformers
pipeline_tag: text-generation
tags:
- fine-tuned
- affine
- text-generation
- causal-lm
license: apache-2.0
affine-train-1
Usage
from transformers import AutoModelForCausalLM, AutoTokenizer
model = AutoModelForCausalLM.from_pretrained("james-unicorn/affine-train-1")
tokenizer = AutoTokenizer.from_pretrained("james-unicorn/affine-train-1")
# Generate text
inputs = tokenizer("Hello, how are you?", return_tensors="pt")
outputs = model.generate(**inputs, max_new_tokens=100)
print(tokenizer.decode(outputs[0], skip_special_tokens=True))