text
stringlengths
0
1.16k
no *************
['no', 'yes', 'no smoking', 'gone', 'man', 'meow', 'kia', 'no clock'] tensor([1.0000e+00, 1.9363e-09, 6.8928e-07, 7.3184e-12, 1.2366e-11, 1.5042e-09,
9.6770e-11, 2.2148e-07], device='cuda:3', grad_fn=<SelectBackward0>)
ζœ€εŽηš„ζ¦‚ηŽ‡εˆ†εΈƒδΈΊ: {True: tensor(6.9506e-09, device='cuda:2', grad_fn=<DivBackward0>), False: tensor(1., device='cuda:2', grad_fn=<DivBackward0>), 'Execute Error': tensor(0., device='cuda:2', grad_fn=<DivBackward0>)}
ANSWER0=VQA(image=RIGHT,question='How many chow dogs are in the image?')
ANSWER1=EVAL(expr='{ANSWER0} == 2')
FINAL_ANSWER=RESULT(var=ANSWER1)
torch.Size([1, 3, 448, 448])
ζœ€εŽηš„ζ¦‚ηŽ‡εˆ†εΈƒδΈΊ: {True: tensor(1.9363e-09, device='cuda:3', grad_fn=<DivBackward0>), False: tensor(1.0000, device='cuda:3', grad_fn=<DivBackward0>), 'Execute Error': tensor(9.5367e-07, device='cuda:3', grad_fn=<DivBackward0>)}
ANSWER0=VQA(image=RIGHT,question='How many shoes are in the image?')
ANSWER1=EVAL(expr='{ANSWER0} == 1')
FINAL_ANSWER=RESULT(var=ANSWER1)
dynamic ViT batch size: 13, images per sample: 13.0, dynamic token length: 3396
torch.Size([3, 3, 448, 448])
question: ['How many chow dogs are in the image?'], responses:['1']
[('1', 0.12829009354978346), ('3', 0.12529928082343206), ('4', 0.12464806219229535), ('8', 0.12460015878893425), ('6', 0.12451220062887247), ('12', 0.124338487048427), ('2', 0.12420459433498025), ('47', 0.12410712263327517)]
[['1', '3', '4', '8', '6', '12', '2', '47']]
torch.Size([1, 3, 448, 448]) knan debug pixel values shape
question: ['How many shoes are in the image?'], responses:['1']
dynamic ViT batch size: 13, images per sample: 13.0, dynamic token length: 3396
[('1', 0.12829009354978346), ('3', 0.12529928082343206), ('4', 0.12464806219229535), ('8', 0.12460015878893425), ('6', 0.12451220062887247), ('12', 0.124338487048427), ('2', 0.12420459433498025), ('47', 0.12410712263327517)]
[['1', '3', '4', '8', '6', '12', '2', '47']]
torch.Size([3, 3, 448, 448]) knan debug pixel values shape
tensor([1.0000e+00, 7.1580e-10, 2.3126e-10, 4.3711e-10, 3.2862e-10, 1.7500e-08,
1.7806e-08, 3.4927e-10], device='cuda:2', grad_fn=<SoftmaxBackward0>)
1 *************
['1', '3', '4', '8', '6', '12', '2', '47'] tensor([1.0000e+00, 7.1580e-10, 2.3126e-10, 4.3711e-10, 3.2862e-10, 1.7500e-08,
1.7806e-08, 3.4927e-10], device='cuda:2', grad_fn=<SelectBackward0>)
ζœ€εŽηš„ζ¦‚ηŽ‡εˆ†εΈƒδΈΊ: {True: tensor(1.7806e-08, device='cuda:2', grad_fn=<DivBackward0>), False: tensor(1., device='cuda:2', grad_fn=<DivBackward0>), 'Execute Error': tensor(0., device='cuda:2', grad_fn=<DivBackward0>)}
dynamic ViT batch size: 13, images per sample: 13.0, dynamic token length: 3396
dynamic ViT batch size: 13, images per sample: 13.0, dynamic token length: 3396
tensor([1.0000e+00, 4.3634e-09, 5.3768e-10, 5.4190e-10, 4.0268e-10, 8.2412e-08,
2.5907e-08, 7.5977e-10], device='cuda:3', grad_fn=<SoftmaxBackward0>)
1 *************
['1', '3', '4', '8', '6', '12', '2', '47'] tensor([1.0000e+00, 4.3634e-09, 5.3768e-10, 5.4190e-10, 4.0268e-10, 8.2412e-08,
2.5907e-08, 7.5977e-10], device='cuda:3', grad_fn=<SelectBackward0>)
ζœ€εŽηš„ζ¦‚ηŽ‡εˆ†εΈƒδΈΊ: {True: tensor(1.0000, device='cuda:3', grad_fn=<DivBackward0>), False: tensor(1.1492e-07, device='cuda:3', grad_fn=<DivBackward0>), 'Execute Error': tensor(5.9605e-08, device='cuda:3', grad_fn=<DivBackward0>)}
dynamic ViT batch size: 13, images per sample: 13.0, dynamic token length: 3396
dynamic ViT batch size: 13, images per sample: 13.0, dynamic token length: 3396
dynamic ViT batch size: 13, images per sample: 13.0, dynamic token length: 3396
tensor([1.0000e+00, 1.5834e-09, 3.0636e-10, 4.2196e-10, 2.0075e-10, 1.7248e-08,
9.2374e-09, 4.3843e-10], device='cuda:0', grad_fn=<SoftmaxBackward0>)
1 *************
['1', '3', '4', '8', '6', '12', '2', '47'] tensor([1.0000e+00, 1.5834e-09, 3.0636e-10, 4.2196e-10, 2.0075e-10, 1.7248e-08,
9.2374e-09, 4.3843e-10], device='cuda:0', grad_fn=<SelectBackward0>)
tensor([1.0000e+00, 2.0503e-08, 2.2518e-11, 2.3206e-08, 3.8295e-10, 5.2935e-10,
1.3403e-11, 2.8774e-08], device='cuda:1', grad_fn=<SoftmaxBackward0>)
yes *************
['yes', 'congratulations', 'no', 'honey', 'solid', 'right', 'candle', 'chocolate'] tensor([1.0000e+00, 2.0503e-08, 2.2518e-11, 2.3206e-08, 3.8295e-10, 5.2935e-10,
1.3403e-11, 2.8774e-08], device='cuda:1', grad_fn=<SelectBackward0>)
ζœ€εŽηš„ζ¦‚ηŽ‡εˆ†εΈƒδΈΊ: {True: tensor(9.2374e-09, device='cuda:0', grad_fn=<DivBackward0>), False: tensor(1., device='cuda:0', grad_fn=<DivBackward0>), 'Execute Error': tensor(0., device='cuda:0', grad_fn=<DivBackward0>)}
ζœ€εŽηš„ζ¦‚ηŽ‡εˆ†εΈƒδΈΊ: {True: tensor(1.0000, device='cuda:1', grad_fn=<DivBackward0>), False: tensor(2.2518e-11, device='cuda:1', grad_fn=<DivBackward0>), 'Execute Error': tensor(1.1919e-07, device='cuda:1', grad_fn=<DivBackward0>)}
ANSWER0=VQA(image=RIGHT,question='Which direction is the animal facing?')
ANSWER1=EVAL(expr='{ANSWER0} == "left"')
FINAL_ANSWER=RESULT(var=ANSWER1)
ANSWER0=VQA(image=LEFT,question='How many dogs are in the image?')
ANSWER1=EVAL(expr='{ANSWER0} == 2')
FINAL_ANSWER=RESULT(var=ANSWER1)
torch.Size([5, 3, 448, 448])
torch.Size([7, 3, 448, 448])
question: ['How many dogs are in the image?'], responses:['1']
[('1', 0.12829009354978346), ('3', 0.12529928082343206), ('4', 0.12464806219229535), ('8', 0.12460015878893425), ('6', 0.12451220062887247), ('12', 0.124338487048427), ('2', 0.12420459433498025), ('47', 0.12410712263327517)]
[['1', '3', '4', '8', '6', '12', '2', '47']]
torch.Size([5, 3, 448, 448]) knan debug pixel values shape
question: ['Which direction is the animal facing?'], responses:['right']
[('right', 0.12743553739412528), ('right 1', 0.12490968573275477), ('straight', 0.12485251094891832), ('floating', 0.12468075392646753), ('flip', 0.12467791878738273), ('backwards', 0.12452118816110067), ('serious', 0.12447626064603681), ('working', 0.12444614440321403)]
[['right', 'right 1', 'straight', 'floating', 'flip', 'backwards', 'serious', 'working']]
torch.Size([7, 3, 448, 448]) knan debug pixel values shape
dynamic ViT batch size: 7, images per sample: 7.0, dynamic token length: 1859
dynamic ViT batch size: 7, images per sample: 7.0, dynamic token length: 1861
dynamic ViT batch size: 7, images per sample: 7.0, dynamic token length: 1860
dynamic ViT batch size: 7, images per sample: 7.0, dynamic token length: 1860
dynamic ViT batch size: 7, images per sample: 7.0, dynamic token length: 1859
tensor([1.0000e+00, 9.1271e-11, 1.1514e-11, 6.8894e-11, 4.0815e-11, 7.6554e-09,
9.8332e-09, 6.2152e-11], device='cuda:1', grad_fn=<SoftmaxBackward0>)
1 *************
['1', '3', '4', '8', '6', '12', '2', '47'] tensor([1.0000e+00, 9.1271e-11, 1.1514e-11, 6.8894e-11, 4.0815e-11, 7.6554e-09,
9.8332e-09, 6.2152e-11], device='cuda:1', grad_fn=<SelectBackward0>)
ζœ€εŽηš„ζ¦‚ηŽ‡εˆ†εΈƒδΈΊ: {True: tensor(9.8332e-09, device='cuda:1', grad_fn=<DivBackward0>), False: tensor(1., device='cuda:1', grad_fn=<DivBackward0>), 'Execute Error': tensor(0., device='cuda:1', grad_fn=<DivBackward0>)}
dynamic ViT batch size: 7, images per sample: 7.0, dynamic token length: 1860
dynamic ViT batch size: 7, images per sample: 7.0, dynamic token length: 1860
dynamic ViT batch size: 7, images per sample: 7.0, dynamic token length: 1859
tensor([9.9597e-01, 7.2802e-06, 1.5822e-04, 4.4234e-07, 4.1026e-08, 3.7410e-03,
1.2150e-04, 3.9999e-08], device='cuda:0', grad_fn=<SoftmaxBackward0>)
right *************
['right', 'right 1', 'straight', 'floating', 'flip', 'backwards', 'serious', 'working'] tensor([9.9597e-01, 7.2802e-06, 1.5822e-04, 4.4234e-07, 4.1026e-08, 3.7410e-03,
1.2150e-04, 3.9999e-08], device='cuda:0', grad_fn=<SelectBackward0>)
ζœ€εŽηš„ζ¦‚ηŽ‡εˆ†εΈƒδΈΊ: {True: tensor(0., device='cuda:0', grad_fn=<MulBackward0>), False: tensor(0., device='cuda:0', grad_fn=<MulBackward0>), 'Execute Error': tensor(1., device='cuda:0', grad_fn=<DivBackward0>)}
[2024-10-24 10:35:32,779] [INFO] [logging.py:96:log_dist] [Rank 0] rank=0 time (ms) | optimizer_allgather: 1.45 | optimizer_gradients: 0.27 | optimizer_step: 0.32
[2024-10-24 10:35:32,780] [INFO] [logging.py:96:log_dist] [Rank 0] rank=0 time (ms) | forward_microstep: 7032.75 | backward_microstep: 6705.38 | backward_inner_microstep: 6699.30 | backward_allreduce_microstep: 5.90 | step_microstep: 7.39
[2024-10-24 10:35:32,780] [INFO] [logging.py:96:log_dist] [Rank 0] rank=0 time (ms) | forward: 7032.76 | backward: 6705.38 | backward_inner: 6699.34 | backward_allreduce: 5.88 | step: 7.40
99%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–‰| 4786/4844 [19:54:16<14:43, 15.24s/it]Registering VQA_lavis step
Registering EVAL step
Registering RESULT step
Registering VQA_lavis step
Registering EVAL step
Registering RESULT step
ANSWER0=VQA(image=RIGHT,question='Are there people in a shop in the image?')
ANSWER1=EVAL(expr='{ANSWER0}')