text
stringlengths 0
1.16k
|
|---|
1.4595e-07, 4.1838e-08], device='cuda:1', grad_fn=<SoftmaxBackward0>)
|
2 *************
|
['2', '3', '4', '1', '5', '8', '7', '29'] tensor([9.9959e-01, 4.0578e-04, 1.2469e-07, 7.5901e-07, 2.1310e-08, 1.8116e-08,
|
1.4595e-07, 4.1838e-08], device='cuda:1', grad_fn=<SelectBackward0>)
|
ζεηζ¦ηεεΈδΈΊ: {True: tensor(0.9996, device='cuda:1', grad_fn=<DivBackward0>), False: tensor(0.0004, device='cuda:1', grad_fn=<DivBackward0>), 'Execute Error': tensor(0., device='cuda:1', grad_fn=<DivBackward0>)}
|
ANSWER0=VQA(image=RIGHT,question='How many shoes are in the image?')
|
ANSWER1=EVAL(expr='{ANSWER0} <= 1')
|
FINAL_ANSWER=RESULT(var=ANSWER1)
|
torch.Size([5, 3, 448, 448])
|
dynamic ViT batch size: 13, images per sample: 13.0, dynamic token length: 3402
|
question: ['How many shoes are in the image?'], responses:['3']
|
[('3', 0.12809209985493852), ('4', 0.12520382509374006), ('1', 0.1251059160028928), ('5', 0.12483070991268265), ('8', 0.12458076282181878), ('2', 0.12413212281858195), ('6', 0.1241125313968017), ('12', 0.12394203209854344)]
|
[['3', '4', '1', '5', '8', '2', '6', '12']]
|
dynamic ViT batch size: 13, images per sample: 13.0, dynamic token length: 3401
|
torch.Size([5, 3, 448, 448]) knan debug pixel values shape
|
dynamic ViT batch size: 13, images per sample: 13.0, dynamic token length: 3401
|
dynamic ViT batch size: 13, images per sample: 13.0, dynamic token length: 3402
|
tensor([1.0000e+00, 1.6592e-10, 7.9131e-12, 1.7832e-11, 1.1695e-11, 2.8613e-09,
|
1.2360e-07, 3.0243e-11], device='cuda:3', grad_fn=<SoftmaxBackward0>)
|
1 *************
|
['1', '3', '4', '8', '6', '12', '2', '47'] tensor([1.0000e+00, 1.6592e-10, 7.9131e-12, 1.7832e-11, 1.1695e-11, 2.8613e-09,
|
1.2360e-07, 3.0243e-11], device='cuda:3', grad_fn=<SelectBackward0>)
|
ζεηζ¦ηεεΈδΈΊ: {True: tensor(1.2360e-07, device='cuda:3', grad_fn=<DivBackward0>), False: tensor(1.0000, device='cuda:3', grad_fn=<DivBackward0>), 'Execute Error': tensor(0., device='cuda:3', grad_fn=<DivBackward0>)}
|
dynamic ViT batch size: 13, images per sample: 13.0, dynamic token length: 3402
|
tensor([9.9951e-01, 3.6192e-06, 1.7991e-07, 6.6648e-09, 6.2406e-10, 4.8903e-04,
|
9.0825e-09, 5.9243e-08], device='cuda:1', grad_fn=<SoftmaxBackward0>)
|
3 *************
|
['3', '4', '1', '5', '8', '2', '6', '12'] tensor([9.9951e-01, 3.6192e-06, 1.7991e-07, 6.6648e-09, 6.2406e-10, 4.8903e-04,
|
9.0825e-09, 5.9243e-08], device='cuda:1', grad_fn=<SelectBackward0>)
|
ζεηζ¦ηεεΈδΈΊ: {True: tensor(1.7991e-07, device='cuda:1', grad_fn=<DivBackward0>), False: tensor(1.0000, device='cuda:1', grad_fn=<DivBackward0>), 'Execute Error': tensor(0., device='cuda:1', grad_fn=<DivBackward0>)}
|
tensor([9.9996e-01, 3.5356e-05, 6.1440e-06, 1.6373e-07, 2.0176e-08, 2.5504e-08,
|
6.8253e-08, 2.3125e-10], device='cuda:2', grad_fn=<SoftmaxBackward0>)
|
2 *************
|
['2', '3', '4', '1', '5', '8', '7', '29'] tensor([9.9996e-01, 3.5356e-05, 6.1440e-06, 1.6373e-07, 2.0176e-08, 2.5504e-08,
|
6.8253e-08, 2.3125e-10], device='cuda:2', grad_fn=<SelectBackward0>)
|
ζεηζ¦ηεεΈδΈΊ: {True: tensor(3.5356e-05, device='cuda:2', grad_fn=<DivBackward0>), False: tensor(1.0000, device='cuda:2', grad_fn=<DivBackward0>), 'Execute Error': tensor(0., device='cuda:2', grad_fn=<DivBackward0>)}
|
ANSWER0=VQA(image=LEFT,question='How many window shades are in the image?')
|
ANSWER1=EVAL(expr='{ANSWER0} == 2')
|
FINAL_ANSWER=RESULT(var=ANSWER1)
|
torch.Size([3, 3, 448, 448])
|
tensor([1.4063e-02, 6.2942e-09, 9.8594e-01, 7.5049e-09, 1.8682e-11, 1.3558e-12,
|
1.6497e-10, 4.0575e-09], device='cuda:0', grad_fn=<SoftmaxBackward0>)
|
no *************
|
['yes', 'congratulations', 'no', 'honey', 'solid', 'right', 'candle', 'chocolate'] tensor([1.4063e-02, 6.2942e-09, 9.8594e-01, 7.5049e-09, 1.8682e-11, 1.3558e-12,
|
1.6497e-10, 4.0575e-09], device='cuda:0', grad_fn=<SelectBackward0>)
|
ζεηζ¦ηεεΈδΈΊ: {True: tensor(0.0141, device='cuda:0', grad_fn=<DivBackward0>), False: tensor(0.9859, device='cuda:0', grad_fn=<DivBackward0>), 'Execute Error': tensor(0., device='cuda:0', grad_fn=<DivBackward0>)}
|
ANSWER0=VQA(image=RIGHT,question='How many animals are in the image?')
|
ANSWER1=EVAL(expr='{ANSWER0} == 1')
|
FINAL_ANSWER=RESULT(var=ANSWER1)
|
torch.Size([3, 3, 448, 448])
|
question: ['How many window shades are in the image?'], responses:['2']
|
[('2', 0.12961991198727602), ('3', 0.12561270547489775), ('4', 0.12556127085987287), ('1', 0.1254920833223361), ('5', 0.12407835939022728), ('8', 0.124024076973589), ('7', 0.12288810153923228), ('29', 0.12272349045256851)]
|
[['2', '3', '4', '1', '5', '8', '7', '29']]
|
question: ['How many animals are in the image?'], responses:['7']
|
torch.Size([3, 3, 448, 448]) knan debug pixel values shape
|
[('7', 0.12828776251745355), ('8', 0.1258361832781132), ('11', 0.12481772898325143), ('5', 0.124759881092759), ('9', 0.12447036165452931), ('10', 0.1239759375399529), ('6', 0.12393017600998846), ('12', 0.12392196892395223)]
|
[['7', '8', '11', '5', '9', '10', '6', '12']]
|
torch.Size([3, 3, 448, 448]) knan debug pixel values shape
|
dynamic ViT batch size: 3, images per sample: 3.0, dynamic token length: 836
|
dynamic ViT batch size: 3, images per sample: 3.0, dynamic token length: 836
|
dynamic ViT batch size: 3, images per sample: 3.0, dynamic token length: 836
|
dynamic ViT batch size: 3, images per sample: 3.0, dynamic token length: 836
|
dynamic ViT batch size: 3, images per sample: 3.0, dynamic token length: 836
|
dynamic ViT batch size: 3, images per sample: 3.0, dynamic token length: 836
|
dynamic ViT batch size: 3, images per sample: 3.0, dynamic token length: 836
|
dynamic ViT batch size: 3, images per sample: 3.0, dynamic token length: 836
|
tensor([9.9996e-01, 3.3214e-05, 1.3709e-06, 1.2482e-06, 2.0816e-08, 3.0764e-08,
|
8.7640e-08, 1.0733e-09], device='cuda:2', grad_fn=<SoftmaxBackward0>)
|
2 *************
|
['2', '3', '4', '1', '5', '8', '7', '29'] tensor([9.9996e-01, 3.3214e-05, 1.3709e-06, 1.2482e-06, 2.0816e-08, 3.0764e-08,
|
8.7640e-08, 1.0733e-09], device='cuda:2', grad_fn=<SelectBackward0>)
|
ζεηζ¦ηεεΈδΈΊ: {True: tensor(1.0000, device='cuda:2', grad_fn=<DivBackward0>), False: tensor(3.5974e-05, device='cuda:2', grad_fn=<DivBackward0>), 'Execute Error': tensor(-1.1921e-07, device='cuda:2', grad_fn=<DivBackward0>)}
|
tensor([9.9142e-01, 2.7572e-04, 6.2813e-03, 2.4880e-05, 1.3166e-03, 1.2235e-04,
|
5.4775e-04, 1.1030e-05], device='cuda:0', grad_fn=<SoftmaxBackward0>)
|
7 *************
|
['7', '8', '11', '5', '9', '10', '6', '12'] tensor([9.9142e-01, 2.7572e-04, 6.2813e-03, 2.4880e-05, 1.3166e-03, 1.2235e-04,
|
5.4775e-04, 1.1030e-05], device='cuda:0', grad_fn=<SelectBackward0>)
|
ζεηζ¦ηεεΈδΈΊ: {True: tensor(0., device='cuda:0', grad_fn=<MulBackward0>), False: tensor(1., device='cuda:0', grad_fn=<DivBackward0>), 'Execute Error': tensor(0., device='cuda:0', grad_fn=<DivBackward0>)}
|
[2024-10-24 10:37:47,212] [INFO] [logging.py:96:log_dist] [Rank 0] rank=0 time (ms) | optimizer_allgather: 1.39 | optimizer_gradients: 0.24 | optimizer_step: 0.34
|
[2024-10-24 10:37:47,212] [INFO] [logging.py:96:log_dist] [Rank 0] rank=0 time (ms) | forward_microstep: 5816.14 | backward_microstep: 5541.77 | backward_inner_microstep: 5536.23 | backward_allreduce_microstep: 5.45 | step_microstep: 8.25
|
[2024-10-24 10:37:47,213] [INFO] [logging.py:96:log_dist] [Rank 0] rank=0 time (ms) | forward: 5816.14 | backward: 5541.76 | backward_inner: 5536.25 | backward_allreduce: 5.44 | step: 8.26
|
99%|ββββββββββ| 4795/4844 [19:56:31<10:53, 13.35s/it]Registering VQA_lavis step
|
Registering EVAL step
|
Registering RESULT step
|
Registering VQA_lavis step
|
Registering EVAL step
|
Registering RESULT step
|
Registering VQA_lavis step
|
Registering EVAL step
|
Registering RESULT step
|
ANSWER0=VQA(image=RIGHT,question='Are there several puppies nestled on straw?')
|
ANSWER1=EVAL(expr='{ANSWER0}')
|
FINAL_ANSWER=RESULT(var=ANSWER1)
|
Registering VQA_lavis step
|
Registering EVAL step
|
Registering RESULT step
|
torch.Size([1, 3, 448, 448])
|
ANSWER0=VQA(image=LEFT,question='Are both beavers standing up on their hind legs?')
|
ANSWER1=EVAL(expr='{ANSWER0}')
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.