text
stringlengths 0
1.16k
|
|---|
1 *************
|
['1', '3', '4', '8', '6', '12', '2', '47'] tensor([1.0000e+00, 2.3375e-10, 5.1895e-12, 1.0003e-11, 2.2896e-11, 2.6047e-09,
|
7.0423e-08, 2.1380e-11], device='cuda:2', grad_fn=<SelectBackward0>)
|
ζεηζ¦ηεεΈδΈΊ: {True: tensor(7.3321e-08, device='cuda:2', grad_fn=<DivBackward0>), False: tensor(1.0000, device='cuda:2', grad_fn=<DivBackward0>), 'Execute Error': tensor(5.9605e-08, device='cuda:2', grad_fn=<DivBackward0>)}
|
ANSWER0=VQA(image=LEFT,question='Is anyone sitting in the car?')
|
ANSWER1=EVAL(expr='not {ANSWER0}')
|
FINAL_ANSWER=RESULT(var=ANSWER1)
|
question: ['How many mittens are in the image?'], responses:['1']
|
torch.Size([13, 3, 448, 448])
|
[('1', 0.12829009354978346), ('3', 0.12529928082343206), ('4', 0.12464806219229535), ('8', 0.12460015878893425), ('6', 0.12451220062887247), ('12', 0.124338487048427), ('2', 0.12420459433498025), ('47', 0.12410712263327517)]
|
[['1', '3', '4', '8', '6', '12', '2', '47']]
|
torch.Size([1, 3, 448, 448]) knan debug pixel values shape
|
dynamic ViT batch size: 13, images per sample: 13.0, dynamic token length: 3402
|
tensor([1.0000e+00, 5.8138e-10, 1.0588e-10, 2.3490e-10, 4.0744e-10, 7.4160e-09,
|
8.8144e-09, 1.3483e-10], device='cuda:3', grad_fn=<SoftmaxBackward0>)
|
1 *************
|
['1', '3', '4', '8', '6', '12', '2', '47'] tensor([1.0000e+00, 5.8138e-10, 1.0588e-10, 2.3490e-10, 4.0744e-10, 7.4160e-09,
|
8.8144e-09, 1.3483e-10], device='cuda:3', grad_fn=<SelectBackward0>)
|
ζεηζ¦ηεεΈδΈΊ: {True: tensor(1.7695e-08, device='cuda:3', grad_fn=<DivBackward0>), False: tensor(1., device='cuda:3', grad_fn=<DivBackward0>), 'Execute Error': tensor(0., device='cuda:3', grad_fn=<DivBackward0>)}
|
tensor([1.0000e+00, 7.3382e-07, 1.1049e-07, 1.3121e-12, 1.8897e-12, 6.8577e-10,
|
5.2155e-11, 9.0266e-08], device='cuda:1', grad_fn=<SoftmaxBackward0>)
|
no *************
|
['no', 'yes', 'no smoking', 'gone', 'man', 'meow', 'kia', 'no clock'] tensor([1.0000e+00, 7.3382e-07, 1.1049e-07, 1.3121e-12, 1.8897e-12, 6.8577e-10,
|
5.2155e-11, 9.0266e-08], device='cuda:1', grad_fn=<SelectBackward0>)
|
ζεηζ¦ηεεΈδΈΊ: {True: tensor(7.3382e-07, device='cuda:1', grad_fn=<DivBackward0>), False: tensor(1.0000, device='cuda:1', grad_fn=<DivBackward0>), 'Execute Error': tensor(2.3842e-07, device='cuda:1', grad_fn=<DivBackward0>)}
|
dynamic ViT batch size: 13, images per sample: 13.0, dynamic token length: 3402
|
dynamic ViT batch size: 13, images per sample: 13.0, dynamic token length: 3402
|
question: ['Is anyone sitting in the car?'], responses:['no']
|
[('no', 0.1313955057270409), ('yes', 0.12592208734904367), ('no smoking', 0.12472972590078177), ('gone', 0.12376514658020793), ('man', 0.12367833016285167), ('meow', 0.1235796378467502), ('kia', 0.12347643720898455), ('no clock', 0.12345312922433942)]
|
[['no', 'yes', 'no smoking', 'gone', 'man', 'meow', 'kia', 'no clock']]
|
tensor([1.0000e+00, 3.5817e-10, 8.0175e-07, 6.9561e-12, 6.0446e-11, 4.7645e-08,
|
2.1660e-10, 1.3001e-06], device='cuda:0', grad_fn=<SoftmaxBackward0>)
|
no *************
|
['no', 'yes', 'no smoking', 'gone', 'man', 'meow', 'kia', 'no clock'] tensor([1.0000e+00, 3.5817e-10, 8.0175e-07, 6.9561e-12, 6.0446e-11, 4.7645e-08,
|
2.1660e-10, 1.3001e-06], device='cuda:0', grad_fn=<SelectBackward0>)
|
ζεηζ¦ηεεΈδΈΊ: {True: tensor(3.5817e-10, device='cuda:0', grad_fn=<DivBackward0>), False: tensor(1.0000, device='cuda:0', grad_fn=<DivBackward0>), 'Execute Error': tensor(2.1458e-06, device='cuda:0', grad_fn=<DivBackward0>)}
|
torch.Size([13, 3, 448, 448]) knan debug pixel values shape
|
tensor([1.0000e+00, 4.3634e-09, 9.7215e-07, 1.0045e-09, 6.7498e-09, 4.5900e-08,
|
8.7457e-10, 4.2249e-07], device='cuda:2', grad_fn=<SoftmaxBackward0>)
|
no *************
|
['no', 'yes', 'no smoking', 'gone', 'man', 'meow', 'kia', 'no clock'] tensor([1.0000e+00, 4.3634e-09, 9.7215e-07, 1.0045e-09, 6.7498e-09, 4.5900e-08,
|
8.7457e-10, 4.2249e-07], device='cuda:2', grad_fn=<SelectBackward0>)
|
ζεηζ¦ηεεΈδΈΊ: {True: tensor(1.0000, device='cuda:2', grad_fn=<DivBackward0>), False: tensor(4.3634e-09, device='cuda:2', grad_fn=<DivBackward0>), 'Execute Error': tensor(1.4305e-06, device='cuda:2', grad_fn=<DivBackward0>)}
|
[2024-10-24 10:49:44,842] [INFO] [logging.py:96:log_dist] [Rank 0] rank=0 time (ms) | optimizer_allgather: 1.36 | optimizer_gradients: 0.36 | optimizer_step: 0.33
|
[2024-10-24 10:49:44,842] [INFO] [logging.py:96:log_dist] [Rank 0] rank=0 time (ms) | forward_microstep: 5857.51 | backward_microstep: 11818.10 | backward_inner_microstep: 5464.12 | backward_allreduce_microstep: 6353.89 | step_microstep: 7.90
|
[2024-10-24 10:49:44,842] [INFO] [logging.py:96:log_dist] [Rank 0] rank=0 time (ms) | forward: 5857.52 | backward: 11818.09 | backward_inner: 5464.13 | backward_allreduce: 6353.88 | step: 7.91
|
100%|ββββββββββ| 4844/4844 [20:08:28<00:00, 16.02s/it]
|
{'loss': 0.6845, 'learning_rate': 0.0, 'epoch': 4.0}
|
100%|ββββββββββ| 4844/4844 [20:08:28<00:00, 16.02s/it][INFO|trainer.py:2946] 2024-10-24 10:49:46,448 >> Saving model checkpoint to work_dirs/internvl_chat_v1_5/internvl_chat_v1_5_internlm2_1_8b_dynamic_res_2nd_finetune_lora/tmp-checkpoint-4844
|
[INFO|configuration_utils.py:473] 2024-10-24 10:49:46,449 >> Configuration saved in work_dirs/internvl_chat_v1_5/internvl_chat_v1_5_internlm2_1_8b_dynamic_res_2nd_finetune_lora/tmp-checkpoint-4844/config.json
|
[INFO|configuration_utils.py:594] 2024-10-24 10:49:46,449 >> Configuration saved in work_dirs/internvl_chat_v1_5/internvl_chat_v1_5_internlm2_1_8b_dynamic_res_2nd_finetune_lora/tmp-checkpoint-4844/generation_config.json
|
[INFO|modeling_utils.py:2493] 2024-10-24 10:49:51,709 >> Model weights saved in work_dirs/internvl_chat_v1_5/internvl_chat_v1_5_internlm2_1_8b_dynamic_res_2nd_finetune_lora/tmp-checkpoint-4844/model.safetensors
|
[INFO|tokenization_utils_base.py:2433] 2024-10-24 10:49:51,711 >> tokenizer config file saved in work_dirs/internvl_chat_v1_5/internvl_chat_v1_5_internlm2_1_8b_dynamic_res_2nd_finetune_lora/tmp-checkpoint-4844/tokenizer_config.json
|
[INFO|tokenization_utils_base.py:2442] 2024-10-24 10:49:51,711 >> Special tokens file saved in work_dirs/internvl_chat_v1_5/internvl_chat_v1_5_internlm2_1_8b_dynamic_res_2nd_finetune_lora/tmp-checkpoint-4844/special_tokens_map.json
|
[INFO|tokenization_utils_base.py:2493] 2024-10-24 10:49:51,711 >> added tokens file saved in work_dirs/internvl_chat_v1_5/internvl_chat_v1_5_internlm2_1_8b_dynamic_res_2nd_finetune_lora/tmp-checkpoint-4844/added_tokens.json
|
[2024-10-24 10:49:51,949] [INFO] [logging.py:96:log_dist] [Rank 0] [Torch] Checkpoint global_step4844 is about to be saved!
|
[2024-10-24 10:49:51,962] [INFO] [logging.py:96:log_dist] [Rank 0] Saving model checkpoint: work_dirs/internvl_chat_v1_5/internvl_chat_v1_5_internlm2_1_8b_dynamic_res_2nd_finetune_lora/tmp-checkpoint-4844/global_step4844/mp_rank_00_model_states.pt
|
[2024-10-24 10:49:51,962] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving work_dirs/internvl_chat_v1_5/internvl_chat_v1_5_internlm2_1_8b_dynamic_res_2nd_finetune_lora/tmp-checkpoint-4844/global_step4844/mp_rank_00_model_states.pt...
|
[2024-10-24 10:49:56,036] [INFO] [torch_checkpoint_engine.py:23:save] [Torch] Saved work_dirs/internvl_chat_v1_5/internvl_chat_v1_5_internlm2_1_8b_dynamic_res_2nd_finetune_lora/tmp-checkpoint-4844/global_step4844/mp_rank_00_model_states.pt.
|
[2024-10-24 10:49:56,038] [INFO] [torch_checkpoint_engine.py:21:save] [Torch] Saving work_dirs/internvl_chat_v1_5/internvl_chat_v1_5_internlm2_1_8b_dynamic_res_2nd_finetune_lora/tmp-checkpoint-4844/global_step4844/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt...
|
[2024-10-24 10:49:56,078] [INFO] [torch_checkpoint_engine.py:23:save] [Torch] Saved work_dirs/internvl_chat_v1_5/internvl_chat_v1_5_internlm2_1_8b_dynamic_res_2nd_finetune_lora/tmp-checkpoint-4844/global_step4844/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt.
|
[2024-10-24 10:49:56,078] [INFO] [engine.py:3285:_save_zero_checkpoint] zero checkpoint saved work_dirs/internvl_chat_v1_5/internvl_chat_v1_5_internlm2_1_8b_dynamic_res_2nd_finetune_lora/tmp-checkpoint-4844/global_step4844/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt
|
[2024-10-24 10:49:56,078] [INFO] [torch_checkpoint_engine.py:33:commit] [Torch] Checkpoint global_step4844 is ready now!
|
[INFO|trainer.py:1963] 2024-10-24 10:49:56,102 >>
|
Training completed. Do not forget to share your model on huggingface.co/models =)
|
{'train_runtime': 72519.9066, 'train_samples_per_second': 0.534, 'train_steps_per_second': 0.067, 'train_loss': 0.6933541081347414, 'epoch': 4.0}
|
100%|ββββββββββ| 4844/4844 [20:08:39<00:00, 16.02s/it]
|
100%|ββββββββββ| 4844/4844 [20:08:39<00:00, 14.97s/it]
|
[INFO|trainer.py:2946] 2024-10-24 10:49:57,719 >> Saving model checkpoint to work_dirs/internvl_chat_v1_5/internvl_chat_v1_5_internlm2_1_8b_dynamic_res_2nd_finetune_lora
|
[INFO|configuration_utils.py:473] 2024-10-24 10:49:57,720 >> Configuration saved in work_dirs/internvl_chat_v1_5/internvl_chat_v1_5_internlm2_1_8b_dynamic_res_2nd_finetune_lora/config.json
|
[INFO|configuration_utils.py:594] 2024-10-24 10:49:57,720 >> Configuration saved in work_dirs/internvl_chat_v1_5/internvl_chat_v1_5_internlm2_1_8b_dynamic_res_2nd_finetune_lora/generation_config.json
|
[INFO|modeling_utils.py:2493] 2024-10-24 10:50:02,937 >> Model weights saved in work_dirs/internvl_chat_v1_5/internvl_chat_v1_5_internlm2_1_8b_dynamic_res_2nd_finetune_lora/model.safetensors
|
[INFO|tokenization_utils_base.py:2433] 2024-10-24 10:50:02,939 >> tokenizer config file saved in work_dirs/internvl_chat_v1_5/internvl_chat_v1_5_internlm2_1_8b_dynamic_res_2nd_finetune_lora/tokenizer_config.json
|
[INFO|tokenization_utils_base.py:2442] 2024-10-24 10:50:02,939 >> Special tokens file saved in work_dirs/internvl_chat_v1_5/internvl_chat_v1_5_internlm2_1_8b_dynamic_res_2nd_finetune_lora/special_tokens_map.json
|
[INFO|tokenization_utils_base.py:2493] 2024-10-24 10:50:02,939 >> added tokens file saved in work_dirs/internvl_chat_v1_5/internvl_chat_v1_5_internlm2_1_8b_dynamic_res_2nd_finetune_lora/added_tokens.json
|
***** train metrics *****
|
epoch = 4.0
|
train_loss = 0.6934
|
train_runtime = 20:08:39.90
|
train_samples = 9681
|
train_samples_per_second = 0.534
|
train_steps_per_second = 0.067
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.