text
stringlengths
0
1.16k
[['2', '3', '4', '1', '5', '8', '7', '29']]
torch.Size([13, 3, 448, 448]) knan debug pixel values shape
tensor([1.0000e+00, 9.3295e-08, 6.1535e-09, 4.4069e-08, 2.9233e-10, 4.5971e-10,
8.3108e-10, 1.9422e-10], device='cuda:2', grad_fn=<SoftmaxBackward0>)
2 *************
['2', '3', '4', '1', '5', '8', '7', '29'] tensor([1.0000e+00, 9.3295e-08, 6.1535e-09, 4.4069e-08, 2.9233e-10, 4.5971e-10,
8.3108e-10, 1.9422e-10], device='cuda:2', grad_fn=<SelectBackward0>)
ζœ€εŽηš„ζ¦‚ηŽ‡εˆ†εΈƒδΈΊ: {True: tensor(4.4069e-08, device='cuda:2', grad_fn=<DivBackward0>), False: tensor(1., device='cuda:2', grad_fn=<DivBackward0>), 'Execute Error': tensor(0., device='cuda:2', grad_fn=<DivBackward0>)}
tensor([1.0000e+00, 8.4589e-10, 4.5921e-07, 4.0586e-10, 1.7655e-10, 1.7586e-07,
1.1680e-08, 4.3026e-07], device='cuda:3', grad_fn=<SoftmaxBackward0>)
no *************
['no', 'yes', 'no smoking', 'gone', 'man', 'meow', 'kia', 'no clock'] tensor([1.0000e+00, 8.4589e-10, 4.5921e-07, 4.0586e-10, 1.7655e-10, 1.7586e-07,
1.1680e-08, 4.3026e-07], device='cuda:3', grad_fn=<SelectBackward0>)
ζœ€εŽηš„ζ¦‚ηŽ‡εˆ†εΈƒδΈΊ: {True: tensor(8.4589e-10, device='cuda:3', grad_fn=<DivBackward0>), False: tensor(1.0000, device='cuda:3', grad_fn=<DivBackward0>), 'Execute Error': tensor(1.0729e-06, device='cuda:3', grad_fn=<DivBackward0>)}
tensor([1.0000e+00, 8.0594e-07, 2.5907e-08, 1.2626e-08, 1.1698e-09, 1.1929e-09,
1.9363e-09, 3.7895e-10], device='cuda:1', grad_fn=<SoftmaxBackward0>)
2 *************
['2', '3', '4', '1', '5', '8', '7', '29'] tensor([1.0000e+00, 8.0594e-07, 2.5907e-08, 1.2626e-08, 1.1698e-09, 1.1929e-09,
1.9363e-09, 3.7895e-10], device='cuda:1', grad_fn=<SelectBackward0>)
ζœ€εŽηš„ζ¦‚ηŽ‡εˆ†εΈƒδΈΊ: {True: tensor(1.0000, device='cuda:1', grad_fn=<DivBackward0>), False: tensor(8.4916e-07, device='cuda:1', grad_fn=<DivBackward0>), 'Execute Error': tensor(0., device='cuda:1', grad_fn=<DivBackward0>)}
[2024-10-24 09:59:21,203] [INFO] [logging.py:96:log_dist] [Rank 0] rank=0 time (ms) | optimizer_allgather: 1.43 | optimizer_gradients: 0.30 | optimizer_step: 0.33
[2024-10-24 09:59:21,203] [INFO] [logging.py:96:log_dist] [Rank 0] rank=0 time (ms) | forward_microstep: 5103.44 | backward_microstep: 12614.94 | backward_inner_microstep: 4846.94 | backward_allreduce_microstep: 7767.90 | step_microstep: 7.74
[2024-10-24 09:59:21,203] [INFO] [logging.py:96:log_dist] [Rank 0] rank=0 time (ms) | forward: 5103.46 | backward: 12614.93 | backward_inner: 4846.99 | backward_allreduce: 7767.88 | step: 7.76
96%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–Œ| 4641/4844 [19:18:05<55:20, 16.36s/it]Registering VQA_lavis step
Registering EVAL step
Registering RESULT step
Registering VQA_lavis step
Registering EVAL step
Registering RESULT step
Registering VQA_lavis step
Registering VQA_lavis step
Registering EVAL step
Registering RESULT step
Registering EVAL step
Registering RESULT step
ANSWER0=VQA(image=RIGHT,question='How many birds are in the image?')
ANSWER1=EVAL(expr='{ANSWER0} == 2')
FINAL_ANSWER=RESULT(var=ANSWER1)
ANSWER0=VQA(image=RIGHT,question='How many wolves are in the image?')
ANSWER1=EVAL(expr='{ANSWER0} == 2')
FINAL_ANSWER=RESULT(var=ANSWER1)
ANSWER0=VQA(image=LEFT,question='How many paper rolls are in the image?')
ANSWER1=EVAL(expr='{ANSWER0} >= 5')
FINAL_ANSWER=RESULT(var=ANSWER1)
ANSWER0=VQA(image=LEFT,question='Is there a barber pole in the image?')
ANSWER1=EVAL(expr='{ANSWER0}')
FINAL_ANSWER=RESULT(var=ANSWER1)
torch.Size([1, 3, 448, 448])
torch.Size([1, 3, 448, 448])
torch.Size([13, 3, 448, 448])
torch.Size([13, 3, 448, 448])
question: ['How many paper rolls are in the image?'], responses:['five']
question: ['How many wolves are in the image?'], responses:['1']
[('7 eleven', 0.1264466744091217), ('babies', 0.124977990347662), ('sunrise', 0.12490143984830117), ('eating', 0.1247676656843781), ('feet', 0.12475702323703439), ('candle', 0.12473210928138137), ('light', 0.12472650705175181), ('floating', 0.12469059014036947)]
[['7 eleven', 'babies', 'sunrise', 'eating', 'feet', 'candle', 'light', 'floating']]
[('1', 0.12829009354978346), ('3', 0.12529928082343206), ('4', 0.12464806219229535), ('8', 0.12460015878893425), ('6', 0.12451220062887247), ('12', 0.124338487048427), ('2', 0.12420459433498025), ('47', 0.12410712263327517)]
[['1', '3', '4', '8', '6', '12', '2', '47']]
torch.Size([1, 3, 448, 448]) knan debug pixel values shape
torch.Size([1, 3, 448, 448]) knan debug pixel values shape
tensor([2.9175e-06, 4.8725e-01, 8.1371e-02, 1.8758e-04, 4.3029e-01, 6.1331e-04,
1.2496e-04, 1.6100e-04], device='cuda:1', grad_fn=<SoftmaxBackward0>)
babies *************
['7 eleven', 'babies', 'sunrise', 'eating', 'feet', 'candle', 'light', 'floating'] tensor([2.9175e-06, 4.8725e-01, 8.1371e-02, 1.8758e-04, 4.3029e-01, 6.1331e-04,
1.2496e-04, 1.6100e-04], device='cuda:1', grad_fn=<SelectBackward0>)
tensor([1.0000e+00, 2.1555e-10, 5.1600e-11, 1.3809e-10, 7.3334e-11, 1.1126e-08,
7.4225e-09, 6.8703e-11], device='cuda:3', grad_fn=<SoftmaxBackward0>)
1 *************
['1', '3', '4', '8', '6', '12', '2', '47'] tensor([1.0000e+00, 2.1555e-10, 5.1600e-11, 1.3809e-10, 7.3334e-11, 1.1126e-08,
7.4225e-09, 6.8703e-11], device='cuda:3', grad_fn=<SelectBackward0>)
ζœ€εŽηš„ζ¦‚ηŽ‡εˆ†εΈƒδΈΊ: {True: tensor(0., device='cuda:1', grad_fn=<MulBackward0>), False: tensor(0., device='cuda:1', grad_fn=<MulBackward0>), 'Execute Error': tensor(1., device='cuda:1', grad_fn=<DivBackward0>)}
ANSWER0=VQA(image=RIGHT,question='How many steeples are in the image?')
ANSWER1=EVAL(expr='{ANSWER0} == 2')
FINAL_ANSWER=RESULT(var=ANSWER1)
ζœ€εŽηš„ζ¦‚ηŽ‡εˆ†εΈƒδΈΊ: {True: tensor(7.4225e-09, device='cuda:3', grad_fn=<DivBackward0>), False: tensor(1., device='cuda:3', grad_fn=<DivBackward0>), 'Execute Error': tensor(0., device='cuda:3', grad_fn=<DivBackward0>)}
ANSWER0=VQA(image=RIGHT,question='What color is the egg in the image?')
ANSWER1=EVAL(expr='{ANSWER0} == "brown"')
FINAL_ANSWER=RESULT(var=ANSWER1)
torch.Size([3, 3, 448, 448])
torch.Size([13, 3, 448, 448])
question: ['What color is the egg in the image?'], responses:['white']
[('white', 0.12741698904857263), ('black', 0.12562195821587463), ('purple', 0.12482758531934457), ('orange', 0.12467593918870701), ('maroon', 0.12456097552653009), ('color', 0.12448461429606533), ('brown', 0.12421598902969112), ('dark', 0.12419594937521464)]
[['white', 'black', 'purple', 'orange', 'maroon', 'color', 'brown', 'dark']]
torch.Size([3, 3, 448, 448]) knan debug pixel values shape
question: ['How many birds are in the image?'], responses:['1']
question: ['Is there a barber pole in the image?'], responses:['no']
[('1', 0.12829009354978346), ('3', 0.12529928082343206), ('4', 0.12464806219229535), ('8', 0.12460015878893425), ('6', 0.12451220062887247), ('12', 0.124338487048427), ('2', 0.12420459433498025), ('47', 0.12410712263327517)]
[['1', '3', '4', '8', '6', '12', '2', '47']]
[('no', 0.1313955057270409), ('yes', 0.12592208734904367), ('no smoking', 0.12472972590078177), ('gone', 0.12376514658020793), ('man', 0.12367833016285167), ('meow', 0.1235796378467502), ('kia', 0.12347643720898455), ('no clock', 0.12345312922433942)]
[['no', 'yes', 'no smoking', 'gone', 'man', 'meow', 'kia', 'no clock']]
torch.Size([13, 3, 448, 448]) knan debug pixel values shape
torch.Size([13, 3, 448, 448]) knan debug pixel values shape
dynamic ViT batch size: 13, images per sample: 13.0, dynamic token length: 3396
tensor([9.7848e-01, 1.1899e-03, 2.3276e-04, 1.4137e-03, 2.1081e-03, 1.2175e-06,
1.6556e-02, 1.3090e-05], device='cuda:3', grad_fn=<SoftmaxBackward0>)
white *************
['white', 'black', 'purple', 'orange', 'maroon', 'color', 'brown', 'dark'] tensor([9.7848e-01, 1.1899e-03, 2.3276e-04, 1.4137e-03, 2.1081e-03, 1.2175e-06,
1.6556e-02, 1.3090e-05], device='cuda:3', grad_fn=<SelectBackward0>)
ζœ€εŽηš„ζ¦‚ηŽ‡εˆ†εΈƒδΈΊ: {True: tensor(0., device='cuda:3', grad_fn=<MulBackward0>), False: tensor(0., device='cuda:3', grad_fn=<MulBackward0>), 'Execute Error': tensor(1., device='cuda:3', grad_fn=<DivBackward0>)}
question: ['How many steeples are in the image?'], responses:['1']