Update configs.py
Browse files- configs.py +3 -2
configs.py
CHANGED
|
@@ -17,8 +17,9 @@ model_info = {
|
|
| 17 |
'LLAMA2-7B': dict(model_path='meta-llama/Llama-2-7b-chat-hf', token=os.environ['hf_token'],
|
| 18 |
original_prompt_template='<s>{prompt}',
|
| 19 |
interpretation_prompt_template='<s>[INST] [X] [/INST] {prompt}',
|
| 20 |
-
layers_format=llama_layers_format),
|
| 21 |
-
'LLAMA2-13B': dict(model_path='meta-llama/Llama-2-13b-chat-hf',
|
|
|
|
| 22 |
original_prompt_template='<s>{prompt}',
|
| 23 |
interpretation_prompt_template='<s>[INST] [X] [/INST] {prompt}',
|
| 24 |
layers_format=llama_layers_format),
|
|
|
|
| 17 |
'LLAMA2-7B': dict(model_path='meta-llama/Llama-2-7b-chat-hf', token=os.environ['hf_token'],
|
| 18 |
original_prompt_template='<s>{prompt}',
|
| 19 |
interpretation_prompt_template='<s>[INST] [X] [/INST] {prompt}',
|
| 20 |
+
layers_format=llama_layers_format),
|
| 21 |
+
'LLAMA2-13B': dict(model_path='meta-llama/Llama-2-13b-chat-hf',
|
| 22 |
+
token=os.environ['hf_token'], device_map='cuda', load_in_8bit=True,
|
| 23 |
original_prompt_template='<s>{prompt}',
|
| 24 |
interpretation_prompt_template='<s>[INST] [X] [/INST] {prompt}',
|
| 25 |
layers_format=llama_layers_format),
|