change cuda
Browse files- app.py +2 -1
- minigpt4/conversation/conversation.py +3 -3
app.py
CHANGED
|
@@ -1005,6 +1005,7 @@ def handle_upload(files, chat_state, img_list):
|
|
| 1005 |
|
| 1006 |
# return gr.update(value=[]), gr.update(value=None, interactive=True), gr.update(placeholder='Please upload your image first', interactive=False),gr.update(value="Upload & Start Chat", interactive=True), gr.update(interactive=False), chat_state, img_list
|
| 1007 |
# chatbot, image, text_input, upload_button, submit_button, chat_state, img_list
|
|
|
|
| 1008 |
def encode_img(img_list, modalities):
|
| 1009 |
instruction, LR_image_list, HR_image_list, modalities = chat.encode_img(img_list, modalities)
|
| 1010 |
return instruction, LR_image_list, HR_image_list, modalities
|
|
@@ -1211,7 +1212,7 @@ def upload_img(chatbot,t1c_ax_file, t1_file, t2_file, fla_file, text_input, chat
|
|
| 1211 |
img_list = []
|
| 1212 |
llm_message = chat.upload_img(files, chat_state, img_list)
|
| 1213 |
|
| 1214 |
-
instruction, LR_image_list, HR_image_list, modalities = encode_img(img_list, modalities)
|
| 1215 |
# instruction, LR_image_list, HR_image_list, modalities = chat.encode_img(img_list, modalities)
|
| 1216 |
return chatbot, chat_state, instruction, LR_image_list, HR_image_list, modalities, gr.update(interactive=False),gr.update(interactive=False)
|
| 1217 |
|
|
|
|
| 1005 |
|
| 1006 |
# return gr.update(value=[]), gr.update(value=None, interactive=True), gr.update(placeholder='Please upload your image first', interactive=False),gr.update(value="Upload & Start Chat", interactive=True), gr.update(interactive=False), chat_state, img_list
|
| 1007 |
# chatbot, image, text_input, upload_button, submit_button, chat_state, img_list
|
| 1008 |
+
|
| 1009 |
def encode_img(img_list, modalities):
|
| 1010 |
instruction, LR_image_list, HR_image_list, modalities = chat.encode_img(img_list, modalities)
|
| 1011 |
return instruction, LR_image_list, HR_image_list, modalities
|
|
|
|
| 1212 |
img_list = []
|
| 1213 |
llm_message = chat.upload_img(files, chat_state, img_list)
|
| 1214 |
|
| 1215 |
+
instruction, LR_image_list.to('cuda'), HR_image_list.to('cuda'), modalities = encode_img(img_list, modalities)
|
| 1216 |
# instruction, LR_image_list, HR_image_list, modalities = chat.encode_img(img_list, modalities)
|
| 1217 |
return chatbot, chat_state, instruction, LR_image_list, HR_image_list, modalities, gr.update(interactive=False),gr.update(interactive=False)
|
| 1218 |
|
minigpt4/conversation/conversation.py
CHANGED
|
@@ -298,7 +298,7 @@ def images_process(image_list,images,modalities=None):#get_item
|
|
| 298 |
|
| 299 |
# aug_probability = np.random.rand()
|
| 300 |
|
| 301 |
-
image_sequence = image_sequence.squeeze()
|
| 302 |
|
| 303 |
image_sequence_LR = our_data_resize(image_sequence) # [32, 256, 256] -> [32, 224, 224]
|
| 304 |
|
|
@@ -334,10 +334,10 @@ def images_process(image_list,images,modalities=None):#get_item
|
|
| 334 |
|
| 335 |
|
| 336 |
|
| 337 |
-
LR_image_list = torch.stack(LR_image_list,dim=0)
|
| 338 |
|
| 339 |
if HR_resolution==True and HR_image_list!=[]:
|
| 340 |
-
HR_image_list = torch.stack(HR_image_list, dim=0)
|
| 341 |
else:
|
| 342 |
HR_resolution=False
|
| 343 |
HR_image_list = []
|
|
|
|
| 298 |
|
| 299 |
# aug_probability = np.random.rand()
|
| 300 |
|
| 301 |
+
image_sequence = image_sequence.squeeze() # torch.Size([32, 630, 637])
|
| 302 |
|
| 303 |
image_sequence_LR = our_data_resize(image_sequence) # [32, 256, 256] -> [32, 224, 224]
|
| 304 |
|
|
|
|
| 334 |
|
| 335 |
|
| 336 |
|
| 337 |
+
LR_image_list = torch.stack(LR_image_list,dim=0) # torch.Size([7, 32, 3, 224, 224])
|
| 338 |
|
| 339 |
if HR_resolution==True and HR_image_list!=[]:
|
| 340 |
+
HR_image_list = torch.stack(HR_image_list, dim=0) # torch.Size([7, 128, 3, 224, 224])
|
| 341 |
else:
|
| 342 |
HR_resolution=False
|
| 343 |
HR_image_list = []
|