Girinath11 commited on
Commit
8e31cc1
·
verified ·
1 Parent(s): 4d76d02

Update model_usage.py

Browse files
Files changed (1) hide show
  1. model_usage.py +10 -6
model_usage.py CHANGED
@@ -20,11 +20,15 @@ for prompt in prompts:
20
  inputs = tokenizer.encode(prompt, return_tensors="pt").to(device)
21
  with torch.no_grad():
22
  outputs = model.generate(
23
- inputs,
24
- max_new_tokens=50,
25
- temperature=0.8,
26
- top_p=0.9,
27
- do_sample=True
28
- )
 
 
 
 
29
  text = tokenizer.decode(outputs[0], skip_special_tokens=True)
30
  print(f"{text}\n")
 
20
  inputs = tokenizer.encode(prompt, return_tensors="pt").to(device)
21
  with torch.no_grad():
22
  outputs = model.generate(
23
+ input_ids,
24
+ max_new_tokens=60,
25
+ temperature=0.7,
26
+ top_p=0.9,
27
+ top_k=50,
28
+ repetition_penalty=1.2,
29
+ no_repeat_ngram_size=3,
30
+ do_sample=True,
31
+ pad_token_id=tokenizer.eos_token_id
32
+ )
33
  text = tokenizer.decode(outputs[0], skip_special_tokens=True)
34
  print(f"{text}\n")