change cuda
Browse files- app.py +3 -3
- minigpt4/conversation/conversation.py +6 -4
app.py
CHANGED
|
@@ -943,7 +943,7 @@ model_cls = registry.get_model_class(model_config.arch)
|
|
| 943 |
# breakpoint()
|
| 944 |
# breakpoint()
|
| 945 |
|
| 946 |
-
model = model_cls.from_config(model_config)
|
| 947 |
|
| 948 |
|
| 949 |
import copy
|
|
@@ -951,7 +951,7 @@ import copy
|
|
| 951 |
# cfg_model_2 = cfg.model_cfg
|
| 952 |
# cfg_model_2['ckpt'] = '/home/ynwang/brain_llm/MiniGPT-4/eval_configs/minigpt4_eval_report.yaml'
|
| 953 |
model_config.ckpt = './checkpoint_1.pth'
|
| 954 |
-
model_2 = model_cls.from_config(model_config)
|
| 955 |
|
| 956 |
# model_2 = copy.deepcopy(model.to('cpu')).to('cuda:{}'.format(args.gpu_id+1))
|
| 957 |
# model.to('cuda:{}'.format(args.gpu_id))
|
|
@@ -1243,7 +1243,7 @@ def button_wait():
|
|
| 1243 |
return gr.update(interactive=False),gr.update(interactive=False),gr.update(interactive=False)
|
| 1244 |
|
| 1245 |
|
| 1246 |
-
@spaces.GPU
|
| 1247 |
def gradio_answer(chatbot, chat_state,instruction, lr_img_list, hr_img_list, modalities,model_type,age='',gender=''):
|
| 1248 |
print('instruction',instruction)
|
| 1249 |
try:
|
|
|
|
| 943 |
# breakpoint()
|
| 944 |
# breakpoint()
|
| 945 |
|
| 946 |
+
model = model_cls.from_config(model_config).to('cuda')
|
| 947 |
|
| 948 |
|
| 949 |
import copy
|
|
|
|
| 951 |
# cfg_model_2 = cfg.model_cfg
|
| 952 |
# cfg_model_2['ckpt'] = '/home/ynwang/brain_llm/MiniGPT-4/eval_configs/minigpt4_eval_report.yaml'
|
| 953 |
model_config.ckpt = './checkpoint_1.pth'
|
| 954 |
+
model_2 = model_cls.from_config(model_config).to('cuda')
|
| 955 |
|
| 956 |
# model_2 = copy.deepcopy(model.to('cpu')).to('cuda:{}'.format(args.gpu_id+1))
|
| 957 |
# model.to('cuda:{}'.format(args.gpu_id))
|
|
|
|
| 1243 |
return gr.update(interactive=False),gr.update(interactive=False),gr.update(interactive=False)
|
| 1244 |
|
| 1245 |
|
| 1246 |
+
@spaces.GPU(duration=180)
|
| 1247 |
def gradio_answer(chatbot, chat_state,instruction, lr_img_list, hr_img_list, modalities,model_type,age='',gender=''):
|
| 1248 |
print('instruction',instruction)
|
| 1249 |
try:
|
minigpt4/conversation/conversation.py
CHANGED
|
@@ -606,10 +606,12 @@ class Chat:
|
|
| 606 |
image_index_list=find_same_view(modalities) #
|
| 607 |
# breakpoint()
|
| 608 |
for img_idx in range(min(3,len(image_index_list))):
|
|
|
|
| 609 |
# images=[image_list_name[img_i] for img_i in image_index_list[img_idx]]
|
| 610 |
# sub_modalities=[modalities[img_i] for img_i in image_index_list[img_idx]]
|
| 611 |
# instruction,image_list,HR_image_list=images_process(images,v,sub_modalities)
|
| 612 |
# breakpoint()
|
|
|
|
| 613 |
instruction=instruction_pool[0]
|
| 614 |
modalities_temp=[modalities[i_] for i_ in image_index_list[img_idx]]
|
| 615 |
modality_num = len(image_index_list[img_idx])
|
|
@@ -676,13 +678,13 @@ class Chat:
|
|
| 676 |
# thread1.join()
|
| 677 |
|
| 678 |
try:
|
| 679 |
-
self.model_list[0].to('cuda')
|
| 680 |
answer = self.model_list[0].generate_step(lr_img_list_combination, confidence_instruction, hr_img_list_combination)
|
| 681 |
except Exception as e:
|
| 682 |
print('model_0 error',e)
|
| 683 |
|
| 684 |
try:
|
| 685 |
-
self.model_list[1].to('cuda')
|
| 686 |
answer_new = self.model_list[1].generate_step(lr_img_list_combination, instruction, hr_img_list_combination)
|
| 687 |
except Exception as e:
|
| 688 |
print('model_1 error',e)
|
|
@@ -779,13 +781,13 @@ class Chat:
|
|
| 779 |
# answer_new = answer_new[0]
|
| 780 |
|
| 781 |
try:
|
| 782 |
-
self.model_list[0].to('cuda')
|
| 783 |
answer = self.model_list[0].generate_step(lr_img_list_combination, confidence_instruction, hr_img_list_combination)
|
| 784 |
except Exception as e:
|
| 785 |
print('model_0 error',e)
|
| 786 |
|
| 787 |
try:
|
| 788 |
-
self.model_list[1].to('cuda')
|
| 789 |
answer_new = self.model_list[1].generate_step(lr_img_list_combination, instruction, hr_img_list_combination)
|
| 790 |
except Exception as e:
|
| 791 |
print('model_1 error',e)
|
|
|
|
| 606 |
image_index_list=find_same_view(modalities) #
|
| 607 |
# breakpoint()
|
| 608 |
for img_idx in range(min(3,len(image_index_list))):
|
| 609 |
+
|
| 610 |
# images=[image_list_name[img_i] for img_i in image_index_list[img_idx]]
|
| 611 |
# sub_modalities=[modalities[img_i] for img_i in image_index_list[img_idx]]
|
| 612 |
# instruction,image_list,HR_image_list=images_process(images,v,sub_modalities)
|
| 613 |
# breakpoint()
|
| 614 |
+
|
| 615 |
instruction=instruction_pool[0]
|
| 616 |
modalities_temp=[modalities[i_] for i_ in image_index_list[img_idx]]
|
| 617 |
modality_num = len(image_index_list[img_idx])
|
|
|
|
| 678 |
# thread1.join()
|
| 679 |
|
| 680 |
try:
|
| 681 |
+
# self.model_list[0].to('cuda')
|
| 682 |
answer = self.model_list[0].generate_step(lr_img_list_combination, confidence_instruction, hr_img_list_combination)
|
| 683 |
except Exception as e:
|
| 684 |
print('model_0 error',e)
|
| 685 |
|
| 686 |
try:
|
| 687 |
+
# self.model_list[1].to('cuda')
|
| 688 |
answer_new = self.model_list[1].generate_step(lr_img_list_combination, instruction, hr_img_list_combination)
|
| 689 |
except Exception as e:
|
| 690 |
print('model_1 error',e)
|
|
|
|
| 781 |
# answer_new = answer_new[0]
|
| 782 |
|
| 783 |
try:
|
| 784 |
+
# self.model_list[0].to('cuda')
|
| 785 |
answer = self.model_list[0].generate_step(lr_img_list_combination, confidence_instruction, hr_img_list_combination)
|
| 786 |
except Exception as e:
|
| 787 |
print('model_0 error',e)
|
| 788 |
|
| 789 |
try:
|
| 790 |
+
# self.model_list[1].to('cuda')
|
| 791 |
answer_new = self.model_list[1].generate_step(lr_img_list_combination, instruction, hr_img_list_combination)
|
| 792 |
except Exception as e:
|
| 793 |
print('model_1 error',e)
|