alexmarques commited on
Commit
edc122e
·
verified ·
1 Parent(s): 8b65aea

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -1
README.md CHANGED
@@ -42,6 +42,7 @@ from transformers import AutoTokenizer
42
 
43
  model_id = "neuralmagic/Meta-Llama-3.1-8B-Instruct-quantized.w8a16"
44
  number_gpus = 1
 
45
 
46
  sampling_params = SamplingParams(temperature=0.6, top_p=0.9, max_tokens=256)
47
 
@@ -54,7 +55,7 @@ messages = [
54
 
55
  prompts = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
56
 
57
- llm = LLM(model=model_id, tensor_parallel_size=number_gpus)
58
 
59
  outputs = llm.generate(prompts, sampling_params)
60
 
 
42
 
43
  model_id = "neuralmagic/Meta-Llama-3.1-8B-Instruct-quantized.w8a16"
44
  number_gpus = 1
45
+ max_model_len = 8192
46
 
47
  sampling_params = SamplingParams(temperature=0.6, top_p=0.9, max_tokens=256)
48
 
 
55
 
56
  prompts = tokenizer.apply_chat_template(messages, add_generation_prompt=True, tokenize=False)
57
 
58
+ llm = LLM(model=model_id, tensor_parallel_size=number_gpus, max_model_len=max_model_len)
59
 
60
  outputs = llm.generate(prompts, sampling_params)
61