Spaces:
Paused
Paused
ValerianFourel commited on
Commit ·
b650f37
1
Parent(s): 913dabd
modify checkpoint
Browse files- app.py +4 -2
- eval_configs/minigptv2_eval.yaml +1 -1
- minigpt4/configs/models/minigpt_v2.yaml +0 -1
app.py
CHANGED
|
@@ -20,7 +20,7 @@ from minigpt4.common.registry import registry
|
|
| 20 |
from minigpt4.conversation.conversation import Conversation, SeparatorStyle, Chat
|
| 21 |
|
| 22 |
# imports modules for registration
|
| 23 |
-
from min交叉
|
| 24 |
|
| 25 |
---
|
| 26 |
|
|
@@ -86,7 +86,9 @@ args = parse_args()
|
|
| 86 |
cfg = Config(args)
|
| 87 |
|
| 88 |
device = 'cuda:{}'.format(args.gpu_id)
|
| 89 |
-
|
|
|
|
|
|
|
| 90 |
model_config = cfg.model_cfg
|
| 91 |
model_config.device_8bit = args.gpu_id
|
| 92 |
model_config.llama_model = llama_2_7b_chat_hf_path # Updated to use downloaded subfolder
|
|
|
|
| 20 |
from minigpt4.conversation.conversation import Conversation, SeparatorStyle, Chat
|
| 21 |
|
| 22 |
# imports modules for registration
|
| 23 |
+
#from min交叉
|
| 24 |
|
| 25 |
---
|
| 26 |
|
|
|
|
| 86 |
cfg = Config(args)
|
| 87 |
|
| 88 |
device = 'cuda:{}'.format(args.gpu_id)
|
| 89 |
+
# Add the specific checkpoint file to the path
|
| 90 |
+
checkpoint_file = "finetuning_3datasets_2025February25.pth"
|
| 91 |
+
cfg.model.ckpt = os.path.join(facevlm_finetune_path, checkpoint_file)
|
| 92 |
model_config = cfg.model_cfg
|
| 93 |
model_config.device_8bit = args.gpu_id
|
| 94 |
model_config.llama_model = llama_2_7b_chat_hf_path # Updated to use downloaded subfolder
|
eval_configs/minigptv2_eval.yaml
CHANGED
|
@@ -5,7 +5,7 @@ model:
|
|
| 5 |
end_sym: "</s>"
|
| 6 |
low_resource: True
|
| 7 |
prompt_template: '[INST] {} [/INST]'
|
| 8 |
-
ckpt: "/fast/vfourel/LMMmodels/minigptv2CheckPoints/minigptv2_checkpoint.pth"
|
| 9 |
lora_r: 64
|
| 10 |
lora_alpha: 16
|
| 11 |
|
|
|
|
| 5 |
end_sym: "</s>"
|
| 6 |
low_resource: True
|
| 7 |
prompt_template: '[INST] {} [/INST]'
|
| 8 |
+
# ckpt: "/fast/vfourel/LMMmodels/minigptv2CheckPoints/minigptv2_checkpoint.pth"
|
| 9 |
lora_r: 64
|
| 10 |
lora_alpha: 16
|
| 11 |
|
minigpt4/configs/models/minigpt_v2.yaml
CHANGED
|
@@ -11,7 +11,6 @@ model:
|
|
| 11 |
# generation configs
|
| 12 |
prompt: ""
|
| 13 |
|
| 14 |
-
llama_model: "/fast/vfourel/LMMmodels/Llama-2-7b-chat-hf"
|
| 15 |
lora_r: 64
|
| 16 |
lora_alpha: 16
|
| 17 |
|
|
|
|
| 11 |
# generation configs
|
| 12 |
prompt: ""
|
| 13 |
|
|
|
|
| 14 |
lora_r: 64
|
| 15 |
lora_alpha: 16
|
| 16 |
|