Update configs.py
Browse files- configs.py +13 -9
configs.py
CHANGED
|
@@ -18,18 +18,22 @@ model_info = {
|
|
| 18 |
original_prompt_template='<s>{prompt}',
|
| 19 |
interpretation_prompt_template='<s>[INST] [X] [/INST] {prompt}',
|
| 20 |
layers_format=llama_layers_format), # , load_in_8bit=True
|
|
|
|
|
|
|
|
|
|
|
|
|
| 21 |
'GPT-2 Small': dict(model_path='gpt2', original_prompt_template='{prompt}',
|
| 22 |
interpretation_prompt_template='User: [X]\n\nAnswer: {prompt}',
|
| 23 |
layers_format=gpt_layers_format),
|
| 24 |
-
'GPT-2 Medium': dict(model_path='gpt2-medium', original_prompt_template='{prompt}',
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
'GPT-2 Large': dict(model_path='gpt2-large', original_prompt_template='{prompt}',
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
'GPT-2 XL': dict(model_path='gpt2-xl', original_prompt_template='{prompt}',
|
| 31 |
-
|
| 32 |
-
|
| 33 |
'GPT-J 6B': dict(model_path='EleutherAI/gpt-j-6b', original_prompt_template='{prompt}',
|
| 34 |
interpretation_prompt_template='User: [X]\n\nAnswer: {prompt}',
|
| 35 |
layers_format=gpt_layers_format),
|
|
|
|
| 18 |
original_prompt_template='<s>{prompt}',
|
| 19 |
interpretation_prompt_template='<s>[INST] [X] [/INST] {prompt}',
|
| 20 |
layers_format=llama_layers_format), # , load_in_8bit=True
|
| 21 |
+
'LLAMA2-13B': dict(model_path='meta-llama/Llama-2-13b-chat-hf', token=os.environ['hf_token'],
|
| 22 |
+
original_prompt_template='<s>{prompt}',
|
| 23 |
+
interpretation_prompt_template='<s>[INST] [X] [/INST] {prompt}',
|
| 24 |
+
layers_format=llama_layers_format),
|
| 25 |
'GPT-2 Small': dict(model_path='gpt2', original_prompt_template='{prompt}',
|
| 26 |
interpretation_prompt_template='User: [X]\n\nAnswer: {prompt}',
|
| 27 |
layers_format=gpt_layers_format),
|
| 28 |
+
# 'GPT-2 Medium': dict(model_path='gpt2-medium', original_prompt_template='{prompt}',
|
| 29 |
+
# interpretation_prompt_template='User: [X]\n\nAnswer: {prompt}',
|
| 30 |
+
# layers_format=gpt_layers_format),
|
| 31 |
+
# 'GPT-2 Large': dict(model_path='gpt2-large', original_prompt_template='{prompt}',
|
| 32 |
+
# interpretation_prompt_template='User: [X]\n\nAnswer: {prompt}',
|
| 33 |
+
# layers_format=gpt_layers_format),
|
| 34 |
+
# 'GPT-2 XL': dict(model_path='gpt2-xl', original_prompt_template='{prompt}',
|
| 35 |
+
# interpretation_prompt_template='User: [X]\n\nAnswer: {prompt}',
|
| 36 |
+
# layers_format=gpt_layers_format),
|
| 37 |
'GPT-J 6B': dict(model_path='EleutherAI/gpt-j-6b', original_prompt_template='{prompt}',
|
| 38 |
interpretation_prompt_template='User: [X]\n\nAnswer: {prompt}',
|
| 39 |
layers_format=gpt_layers_format),
|