text
stringlengths
0
1.16k
dynamic ViT batch size: 1, images per sample: 1.0, dynamic token length: 330
[('3', 0.12809209985493852), ('4', 0.12520382509374006), ('1', 0.1251059160028928), ('5', 0.12483070991268265), ('8', 0.12458076282181878), ('2', 0.12413212281858195), ('6', 0.1241125313968017), ('12', 0.12394203209854344)]
[['3', '4', '1', '5', '8', '2', '6', '12']]
dynamic ViT batch size: 1, images per sample: 1.0, dynamic token length: 330
tensor([9.9837e-01, 4.4879e-06, 1.5978e-03, 8.9252e-06, 7.2540e-08, 1.3824e-05,
3.2834e-06, 4.2134e-10], device='cuda:0', grad_fn=<SoftmaxBackward0>)
2 *************
['2', '3', '4', '1', '5', '8', '7', '29'] tensor([9.9837e-01, 4.4879e-06, 1.5978e-03, 8.9252e-06, 7.2540e-08, 1.3824e-05,
3.2834e-06, 4.2134e-10], device='cuda:0', grad_fn=<SelectBackward0>)
ζœ€εŽηš„ζ¦‚ηŽ‡εˆ†εΈƒδΈΊ: {True: tensor(8.9252e-06, device='cuda:0', grad_fn=<DivBackward0>), False: tensor(1.0000, device='cuda:0', grad_fn=<DivBackward0>), 'Execute Error': tensor(5.9605e-08, device='cuda:0', grad_fn=<DivBackward0>)}
torch.Size([5, 3, 448, 448]) knan debug pixel values shape
ANSWER0=VQA(image=RIGHT,question='How many dogs are in the image?')
ANSWER1=EVAL(expr='{ANSWER0} == 1')
FINAL_ANSWER=RESULT(var=ANSWER1)
torch.Size([5, 3, 448, 448])
question: ['How many gold safety pins are in the image?'], responses:['1']
[('1', 0.12829009354978346), ('3', 0.12529928082343206), ('4', 0.12464806219229535), ('8', 0.12460015878893425), ('6', 0.12451220062887247), ('12', 0.124338487048427), ('2', 0.12420459433498025), ('47', 0.12410712263327517)]
[['1', '3', '4', '8', '6', '12', '2', '47']]
torch.Size([7, 3, 448, 448]) knan debug pixel values shape
question: ['How many dogs are in the image?'], responses:['1']
tensor([1.0000e+00, 9.9099e-09, 4.0126e-08, 4.8353e-08, 4.3623e-09, 1.3730e-09,
4.0984e-10, 2.5768e-08], device='cuda:3', grad_fn=<SoftmaxBackward0>)
yes *************
['yes', 'congratulations', 'no', 'honey', 'solid', 'right', 'candle', 'chocolate'] tensor([1.0000e+00, 9.9099e-09, 4.0126e-08, 4.8353e-08, 4.3623e-09, 1.3730e-09,
4.0984e-10, 2.5768e-08], device='cuda:3', grad_fn=<SelectBackward0>)
[('1', 0.12829009354978346), ('3', 0.12529928082343206), ('4', 0.12464806219229535), ('8', 0.12460015878893425), ('6', 0.12451220062887247), ('12', 0.124338487048427), ('2', 0.12420459433498025), ('47', 0.12410712263327517)]
[['1', '3', '4', '8', '6', '12', '2', '47']]
ζœ€εŽηš„ζ¦‚ηŽ‡εˆ†εΈƒδΈΊ: {True: tensor(4.0126e-08, device='cuda:3', grad_fn=<DivBackward0>), False: tensor(1.0000, device='cuda:3', grad_fn=<DivBackward0>), 'Execute Error': tensor(7.9084e-08, device='cuda:3', grad_fn=<DivBackward0>)}
ANSWER0=VQA(image=LEFT,question='How many cylindrical pencil cases are in the image?')
ANSWER1=EVAL(expr='{ANSWER0} == 1')
FINAL_ANSWER=RESULT(var=ANSWER1)
torch.Size([1, 3, 448, 448])
torch.Size([5, 3, 448, 448]) knan debug pixel values shape
dynamic ViT batch size: 5, images per sample: 5.0, dynamic token length: 1348
question: ['How many cylindrical pencil cases are in the image?'], responses:['3']
[('3', 0.12809209985493852), ('4', 0.12520382509374006), ('1', 0.1251059160028928), ('5', 0.12483070991268265), ('8', 0.12458076282181878), ('2', 0.12413212281858195), ('6', 0.1241125313968017), ('12', 0.12394203209854344)]
[['3', '4', '1', '5', '8', '2', '6', '12']]
torch.Size([1, 3, 448, 448]) knan debug pixel values shape
dynamic ViT batch size: 5, images per sample: 5.0, dynamic token length: 1348
dynamic ViT batch size: 5, images per sample: 5.0, dynamic token length: 1348
dynamic ViT batch size: 5, images per sample: 5.0, dynamic token length: 1348
tensor([9.9944e-01, 6.3357e-06, 3.2550e-07, 9.9821e-09, 1.5888e-10, 5.5288e-04,
1.0199e-09, 4.3438e-09], device='cuda:3', grad_fn=<SoftmaxBackward0>)
3 *************
['3', '4', '1', '5', '8', '2', '6', '12'] tensor([9.9944e-01, 6.3357e-06, 3.2550e-07, 9.9821e-09, 1.5888e-10, 5.5288e-04,
1.0199e-09, 4.3438e-09], device='cuda:3', grad_fn=<SelectBackward0>)
ζœ€εŽηš„ζ¦‚ηŽ‡εˆ†εΈƒδΈΊ: {True: tensor(3.2550e-07, device='cuda:3', grad_fn=<DivBackward0>), False: tensor(1.0000, device='cuda:3', grad_fn=<DivBackward0>), 'Execute Error': tensor(1.1921e-07, device='cuda:3', grad_fn=<DivBackward0>)}
tensor([1.0000e+00, 9.7252e-07, 9.7245e-07, 4.4517e-09, 3.9974e-10, 6.6840e-07,
2.6474e-09, 5.8384e-08], device='cuda:1', grad_fn=<SoftmaxBackward0>)
3 *************
['3', '4', '1', '5', '8', '2', '6', '12'] tensor([1.0000e+00, 9.7252e-07, 9.7245e-07, 4.4517e-09, 3.9974e-10, 6.6840e-07,
2.6474e-09, 5.8384e-08], device='cuda:1', grad_fn=<SelectBackward0>)
ζœ€εŽηš„ζ¦‚ηŽ‡εˆ†εΈƒδΈΊ: {True: tensor(9.7245e-07, device='cuda:1', grad_fn=<DivBackward0>), False: tensor(1.0000, device='cuda:1', grad_fn=<DivBackward0>), 'Execute Error': tensor(0., device='cuda:1', grad_fn=<DivBackward0>)}
ANSWER0=VQA(image=LEFT,question='How many dogs are in the image?')
ANSWER1=EVAL(expr='{ANSWER0} <= 1')
FINAL_ANSWER=RESULT(var=ANSWER1)
dynamic ViT batch size: 5, images per sample: 5.0, dynamic token length: 1348
torch.Size([3, 3, 448, 448])
dynamic ViT batch size: 5, images per sample: 5.0, dynamic token length: 1348
dynamic ViT batch size: 5, images per sample: 5.0, dynamic token length: 1348
question: ['How many dogs are in the image?'], responses:['2']
[('2', 0.12961991198727602), ('3', 0.12561270547489775), ('4', 0.12556127085987287), ('1', 0.1254920833223361), ('5', 0.12407835939022728), ('8', 0.124024076973589), ('7', 0.12288810153923228), ('29', 0.12272349045256851)]
[['2', '3', '4', '1', '5', '8', '7', '29']]
torch.Size([3, 3, 448, 448]) knan debug pixel values shape
dynamic ViT batch size: 5, images per sample: 5.0, dynamic token length: 1348
tensor([1.0000e+00, 1.2308e-09, 3.2612e-10, 8.1977e-10, 1.1204e-09, 1.4406e-07,
2.1024e-07, 3.9136e-09], device='cuda:2', grad_fn=<SoftmaxBackward0>)
1 *************
['1', '3', '4', '8', '6', '12', '2', '47'] tensor([1.0000e+00, 1.2308e-09, 3.2612e-10, 8.1977e-10, 1.1204e-09, 1.4406e-07,
2.1024e-07, 3.9136e-09], device='cuda:2', grad_fn=<SelectBackward0>)
tensor([1.0000e+00, 8.4921e-10, 2.6617e-10, 4.6690e-10, 2.5002e-10, 3.8637e-08,
6.1056e-09, 4.2860e-10], device='cuda:0', grad_fn=<SoftmaxBackward0>)
1 *************
['1', '3', '4', '8', '6', '12', '2', '47'] tensor([1.0000e+00, 8.4921e-10, 2.6617e-10, 4.6690e-10, 2.5002e-10, 3.8637e-08,
6.1056e-09, 4.2860e-10], device='cuda:0', grad_fn=<SelectBackward0>)
ζœ€εŽηš„ζ¦‚ηŽ‡εˆ†εΈƒδΈΊ: {True: tensor(1.0000, device='cuda:2', grad_fn=<DivBackward0>), False: tensor(3.6171e-07, device='cuda:2', grad_fn=<DivBackward0>), 'Execute Error': tensor(0., device='cuda:2', grad_fn=<DivBackward0>)}
ζœ€εŽηš„ζ¦‚ηŽ‡εˆ†εΈƒδΈΊ: {True: tensor(1., device='cuda:0', grad_fn=<DivBackward0>), False: tensor(4.7004e-08, device='cuda:0', grad_fn=<DivBackward0>), 'Execute Error': tensor(0., device='cuda:0', grad_fn=<DivBackward0>)}
ANSWER0=VQA(image=RIGHT,question='How many hamsters are in the image?')
ANSWER1=EVAL(expr='{ANSWER0} >= 2')
FINAL_ANSWER=RESULT(var=ANSWER1)
torch.Size([13, 3, 448, 448])
tensor([1.0000e+00, 2.0176e-08, 2.0451e-09, 6.6156e-08, 6.3222e-11, 1.5646e-10,
2.2721e-10, 7.8039e-11], device='cuda:1', grad_fn=<SoftmaxBackward0>)
2 *************
['2', '3', '4', '1', '5', '8', '7', '29'] tensor([1.0000e+00, 2.0176e-08, 2.0451e-09, 6.6156e-08, 6.3222e-11, 1.5646e-10,
2.2721e-10, 7.8039e-11], device='cuda:1', grad_fn=<SelectBackward0>)
ζœ€εŽηš„ζ¦‚ηŽ‡εˆ†εΈƒδΈΊ: {True: tensor(6.6156e-08, device='cuda:1', grad_fn=<DivBackward0>), False: tensor(1.0000, device='cuda:1', grad_fn=<DivBackward0>), 'Execute Error': tensor(5.9605e-08, device='cuda:1', grad_fn=<DivBackward0>)}
question: ['How many hamsters are in the image?'], responses:['1']
[('1', 0.12829009354978346), ('3', 0.12529928082343206), ('4', 0.12464806219229535), ('8', 0.12460015878893425), ('6', 0.12451220062887247), ('12', 0.124338487048427), ('2', 0.12420459433498025), ('47', 0.12410712263327517)]
[['1', '3', '4', '8', '6', '12', '2', '47']]
torch.Size([13, 3, 448, 448]) knan debug pixel values shape
tensor([1.0000e+00, 5.0511e-10, 1.1448e-10, 4.0257e-10, 2.5989e-10, 3.2621e-08,
1.4533e-08, 6.9189e-10], device='cuda:2', grad_fn=<SoftmaxBackward0>)
1 *************
['1', '3', '4', '8', '6', '12', '2', '47'] tensor([1.0000e+00, 5.0511e-10, 1.1448e-10, 4.0257e-10, 2.5989e-10, 3.2621e-08,
1.4533e-08, 6.9189e-10], device='cuda:2', grad_fn=<SelectBackward0>)
ζœ€εŽηš„ζ¦‚ηŽ‡εˆ†εΈƒδΈΊ: {True: tensor(4.9128e-08, device='cuda:2', grad_fn=<DivBackward0>), False: tensor(1., device='cuda:2', grad_fn=<DivBackward0>), 'Execute Error': tensor(0., device='cuda:2', grad_fn=<DivBackward0>)}
[2024-10-24 10:29:42,702] [INFO] [logging.py:96:log_dist] [Rank 0] rank=0 time (ms) | optimizer_allgather: 1.35 | optimizer_gradients: 0.30 | optimizer_step: 0.32
[2024-10-24 10:29:42,703] [INFO] [logging.py:96:log_dist] [Rank 0] rank=0 time (ms) | forward_microstep: 2546.29 | backward_microstep: 11184.51 | backward_inner_microstep: 2399.10 | backward_allreduce_microstep: 8785.30 | step_microstep: 7.53
[2024-10-24 10:29:42,703] [INFO] [logging.py:96:log_dist] [Rank 0] rank=0 time (ms) | forward: 2546.29 | backward: 11184.50 | backward_inner: 2399.12 | backward_allreduce: 8785.30 | step: 7.55