xtie commited on
Commit
319a5ab
·
1 Parent(s): a71593d

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +13 -11
README.md CHANGED
@@ -46,18 +46,20 @@ Indication: The patient is a 60-year old male with a history of xxx
46
  inputs = tokenizer(findings_info.replace('\n', ' '), padding="max_length", truncation=True, max_length=1024, return_tensors="pt")
47
  input_ids = inputs.input_ids.to("cuda")
48
  attention_mask = inputs.attention_mask.to("cuda")
49
- outputs = model.generate(input_ids, attention_mask=attention_mask, max_new_tokens=512,
50
- num_beam_groups=1,
51
- num_beams=4,
52
- do_sample=False,
53
- diversity_penalty=0.0,
54
- num_return_sequences=1,
55
- length_penalty=2.0,
56
- no_repeat_ngram_size=3,
57
- early_stopping=True)
58
-
 
 
59
  # all special tokens including will be removed
60
- output_str = tokenizer.batch_decode(outputs, skip_special_tokens=True) # get the generated impressions
61
  ```
62
 
63
 
 
46
  inputs = tokenizer(findings_info.replace('\n', ' '), padding="max_length", truncation=True, max_length=1024, return_tensors="pt")
47
  input_ids = inputs.input_ids.to("cuda")
48
  attention_mask = inputs.attention_mask.to("cuda")
49
+ outputs = model.generate(input_ids,
50
+ attention_mask=attention_mask,
51
+ max_new_tokens=512,
52
+ num_beam_groups=1,
53
+ num_beams=4,
54
+ do_sample=False,
55
+ diversity_penalty=0.0,
56
+ num_return_sequences=1,
57
+ length_penalty=2.0,
58
+ no_repeat_ngram_size=3,
59
+ early_stopping=True
60
+ )
61
  # all special tokens including will be removed
62
+ output_str = tokenizer.decode(outputs, skip_special_tokens=True) # get the generated impressions
63
  ```
64
 
65