OpenNLPLab commited on
Commit
fc689fe
·
1 Parent(s): 8d3a720

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -144,8 +144,8 @@ export use_triton=False
144
 
145
  ```python
146
  >>> from transformers import AutoModelForCausalLM, AutoTokenizer
147
- >>> tokenizer = AutoTokenizer.from_pretrained("OpenNLPLab/TransNormerLLM-1B", trust_remote_code=True)
148
- >>> model = AutoModelForCausalLM.from_pretrained("OpenNLPLab/TransNormerLLM-1B", device_map="auto", trust_remote_code=True)
149
  >>> inputs = tokenizer('今天是美好的一天', return_tensors='pt')
150
  >>> pred = model.generate(**inputs, max_new_tokens=2048, repetition_penalty=1.0)
151
  >>> print(tokenizer.decode(pred.cpu()[0], skip_special_tokens=True))
@@ -175,7 +175,7 @@ Training Data: `alpaca_data.json`. This sample data was drawn from [alpaca_data.
175
  torchrun \
176
  --nproc_per_node=8 \
177
  train.py \
178
- --model_name_or_path OpenNLPLab/TransNormerLLM-1B \
179
  --data_path ./alpaca_data.json \
180
  --output_dir output \
181
  --num_train_epochs 1 \
 
144
 
145
  ```python
146
  >>> from transformers import AutoModelForCausalLM, AutoTokenizer
147
+ >>> tokenizer = AutoTokenizer.from_pretrained("OpenNLPLab/TransNormerLLM-385M", trust_remote_code=True)
148
+ >>> model = AutoModelForCausalLM.from_pretrained("OpenNLPLab/TransNormerLLM-385M", device_map="auto", trust_remote_code=True)
149
  >>> inputs = tokenizer('今天是美好的一天', return_tensors='pt')
150
  >>> pred = model.generate(**inputs, max_new_tokens=2048, repetition_penalty=1.0)
151
  >>> print(tokenizer.decode(pred.cpu()[0], skip_special_tokens=True))
 
175
  torchrun \
176
  --nproc_per_node=8 \
177
  train.py \
178
+ --model_name_or_path OpenNLPLab/TransNormerLLM-385M \
179
  --data_path ./alpaca_data.json \
180
  --output_dir output \
181
  --num_train_epochs 1 \