procesaur commited on
Commit
f2fae1d
·
verified ·
1 Parent(s): 3ab7abc

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +11 -11
README.md CHANGED
@@ -45,17 +45,17 @@ pipeline_tag: summarization
45
  >>> prompt_lengths = inputs["input_ids"].shape[1]
46
 
47
  >>> with torch.no_grad():
48
- >>> generated_ids = model.generate(
49
- input_ids=inputs["input_ids"],
50
- attention_mask=inputs["attention_mask"],
51
- max_new_tokens=100,
52
- no_repeat_ngram_size=3,
53
- num_beams=6,
54
- min_length = 30,
55
- length_penalty = -0.5,
56
- early_stopping = True,
57
- pad_token_id = tokenizer.pad_token_id,
58
- eos_token_id = tokenizer.pad_token_id)
59
 
60
  >>> decoded_output = tokenizer.decode(generated_ids[0][prompt_length:], skip_special_tokens=True).strip()
61
 
 
45
  >>> prompt_lengths = inputs["input_ids"].shape[1]
46
 
47
  >>> with torch.no_grad():
48
+ >>> generated_ids = model.generate(
49
+ >>> input_ids=inputs["input_ids"],
50
+ >>> attention_mask=inputs["attention_mask"],
51
+ >>> max_new_tokens=100,
52
+ >>> no_repeat_ngram_size=3,
53
+ >>> num_beams=6,
54
+ >>> min_length = 30,
55
+ >>> length_penalty = -0.5,
56
+ >>> early_stopping = True,
57
+ >>> pad_token_id = tokenizer.pad_token_id,
58
+ >>> eos_token_id = tokenizer.pad_token_id)
59
 
60
  >>> decoded_output = tokenizer.decode(generated_ids[0][prompt_length:], skip_special_tokens=True).strip()
61