text
stringlengths 0
1.16k
|
|---|
tensor([8.9376e-01, 1.5985e-02, 8.8551e-02, 1.0858e-03, 7.0880e-05, 2.1915e-04,
|
1.8784e-05, 3.1332e-04], device='cuda:2', grad_fn=<SoftmaxBackward0>)
|
yes *************
|
['yes', 'congratulations', 'no', 'honey', 'solid', 'right', 'candle', 'chocolate'] tensor([8.9376e-01, 1.5985e-02, 8.8551e-02, 1.0858e-03, 7.0880e-05, 2.1915e-04,
|
1.8784e-05, 3.1332e-04], device='cuda:2', grad_fn=<SelectBackward0>)
|
ๆๅ็ๆฆ็ๅๅธไธบ: {True: tensor(0.8938, device='cuda:2', grad_fn=<DivBackward0>), False: tensor(0.0886, device='cuda:2', grad_fn=<DivBackward0>), 'Execute Error': tensor(0.0177, device='cuda:2', grad_fn=<DivBackward0>)}
|
question: ['How many dogs are in the image?'], responses:['2']
|
[('2', 0.12961991198727602), ('3', 0.12561270547489775), ('4', 0.12556127085987287), ('1', 0.1254920833223361), ('5', 0.12407835939022728), ('8', 0.124024076973589), ('7', 0.12288810153923228), ('29', 0.12272349045256851)]
|
[['2', '3', '4', '1', '5', '8', '7', '29']]
|
torch.Size([13, 3, 448, 448]) knan debug pixel values shape
|
tensor([6.5949e-01, 9.4542e-02, 3.2833e-02, 1.8920e-01, 1.4498e-02, 4.4420e-03,
|
4.7291e-03, 2.7111e-04], device='cuda:1', grad_fn=<SoftmaxBackward0>)
|
2 *************
|
['2', '3', '4', '1', '5', '8', '7', '29'] tensor([6.5949e-01, 9.4542e-02, 3.2833e-02, 1.8920e-01, 1.4498e-02, 4.4420e-03,
|
4.7291e-03, 2.7111e-04], device='cuda:1', grad_fn=<SelectBackward0>)
|
ๆๅ็ๆฆ็ๅๅธไธบ: {True: tensor(0.6595, device='cuda:1', grad_fn=<DivBackward0>), False: tensor(0.3405, device='cuda:1', grad_fn=<DivBackward0>), 'Execute Error': tensor(0., device='cuda:1', grad_fn=<DivBackward0>)}
|
ANSWER0=VQA(image=RIGHT,question='How many cats are in the image?')
|
ANSWER1=EVAL(expr='{ANSWER0} == 2')
|
FINAL_ANSWER=RESULT(var=ANSWER1)
|
torch.Size([3, 3, 448, 448])
|
question: ['How many cats are in the image?'], responses:['1']
|
[('1', 0.12829009354978346), ('3', 0.12529928082343206), ('4', 0.12464806219229535), ('8', 0.12460015878893425), ('6', 0.12451220062887247), ('12', 0.124338487048427), ('2', 0.12420459433498025), ('47', 0.12410712263327517)]
|
[['1', '3', '4', '8', '6', '12', '2', '47']]
|
torch.Size([3, 3, 448, 448]) knan debug pixel values shape
|
tensor([7.6697e-01, 5.5561e-02, 2.1087e-02, 7.2779e-03, 1.0275e-02, 5.1336e-03,
|
1.3328e-01, 4.1043e-04], device='cuda:1', grad_fn=<SoftmaxBackward0>)
|
1 *************
|
['1', '3', '4', '8', '6', '12', '2', '47'] tensor([7.6697e-01, 5.5561e-02, 2.1087e-02, 7.2779e-03, 1.0275e-02, 5.1336e-03,
|
1.3328e-01, 4.1043e-04], device='cuda:1', grad_fn=<SelectBackward0>)
|
ๆๅ็ๆฆ็ๅๅธไธบ: {True: tensor(0.1333, device='cuda:1', grad_fn=<DivBackward0>), False: tensor(0.8667, device='cuda:1', grad_fn=<DivBackward0>), 'Execute Error': tensor(0., device='cuda:1', grad_fn=<DivBackward0>)}
|
tensor([9.1325e-01, 2.7577e-02, 6.9725e-03, 4.8388e-02, 1.9970e-03, 9.6922e-04,
|
7.5577e-04, 8.8469e-05], device='cuda:3', grad_fn=<SoftmaxBackward0>)
|
2 *************
|
['2', '3', '4', '1', '5', '8', '7', '29'] tensor([9.1325e-01, 2.7577e-02, 6.9725e-03, 4.8388e-02, 1.9970e-03, 9.6922e-04,
|
7.5577e-04, 8.8469e-05], device='cuda:3', grad_fn=<SelectBackward0>)
|
ๆๅ็ๆฆ็ๅๅธไธบ: {True: tensor(0.0276, device='cuda:3', grad_fn=<DivBackward0>), False: tensor(0.9724, device='cuda:3', grad_fn=<DivBackward0>), 'Execute Error': tensor(-1.1921e-07, device='cuda:3', grad_fn=<DivBackward0>)}
|
ANSWER0=VQA(image=RIGHT,question='How many dogs are in the image?')
|
ANSWER1=EVAL(expr='{ANSWER0} <= 2')
|
FINAL_ANSWER=RESULT(var=ANSWER1)
|
torch.Size([3, 3, 448, 448])
|
question: ['How many dogs are in the image?'], responses:['7']
|
[('7', 0.12828776251745355), ('8', 0.1258361832781132), ('11', 0.12481772898325143), ('5', 0.124759881092759), ('9', 0.12447036165452931), ('10', 0.1239759375399529), ('6', 0.12393017600998846), ('12', 0.12392196892395223)]
|
[['7', '8', '11', '5', '9', '10', '6', '12']]
|
torch.Size([3, 3, 448, 448]) knan debug pixel values shape
|
tensor([0.2003, 0.1466, 0.0642, 0.1431, 0.1294, 0.0948, 0.1803, 0.0414],
|
device='cuda:3', grad_fn=<SoftmaxBackward0>)
|
7 *************
|
['7', '8', '11', '5', '9', '10', '6', '12'] tensor([0.2003, 0.1466, 0.0642, 0.1431, 0.1294, 0.0948, 0.1803, 0.0414],
|
device='cuda:3', grad_fn=<SelectBackward0>)
|
ๆๅ็ๆฆ็ๅๅธไธบ: {True: tensor(0., device='cuda:3', grad_fn=<MulBackward0>), False: tensor(1., device='cuda:3', grad_fn=<DivBackward0>), 'Execute Error': tensor(0., device='cuda:3', grad_fn=<DivBackward0>)}
|
[2024-10-22 17:21:22,886] [INFO] [logging.py:96:log_dist] [Rank 0] rank=0 time (ms) | optimizer_allgather: 1.36 | optimizer_gradients: 0.29 | optimizer_step: 0.32
|
[2024-10-22 17:21:22,887] [INFO] [logging.py:96:log_dist] [Rank 0] rank=0 time (ms) | forward_microstep: 5706.47 | backward_microstep: 20272.53 | backward_inner_microstep: 5210.77 | backward_allreduce_microstep: 15061.65 | step_microstep: 7.74
|
[2024-10-22 17:21:22,887] [INFO] [logging.py:96:log_dist] [Rank 0] rank=0 time (ms) | forward: 5706.49 | backward: 20272.52 | backward_inner: 5210.81 | backward_allreduce: 15061.63 | step: 7.75
|
0%| | 7/2424 [02:55<16:26:54, 24.50s/it]Registering VQA_lavis step
|
Registering EVAL step
|
Registering RESULT step
|
Registering VQA_lavis step
|
Registering EVAL step
|
Registering RESULT step
|
Registering VQA_lavis step
|
Registering EVAL step
|
Registering RESULT step
|
Registering VQA_lavis step
|
Registering EVAL step
|
Registering RESULT step
|
ANSWER0=VQA(image=RIGHT,question='Does the image show the back end of a bus?')
|
ANSWER1=EVAL(expr='{ANSWER0}')
|
FINAL_ANSWER=RESULT(var=ANSWER1)
|
ANSWER0=VQA(image=RIGHT,question='Is part of a round metal tray visible between at least two slices of pizza?')
|
ANSWER1=EVAL(expr='{ANSWER0}')
|
FINAL_ANSWER=RESULT(var=ANSWER1)
|
ANSWER0=VQA(image=RIGHT,question='Is the toe of the shoe pointed to the left?')
|
FINAL_ANSWER=RESULT(var=ANSWER0)
|
ANSWER0=VQA(image=RIGHT,question='Is the drain in the bottom of the basin visible?')
|
ANSWER1=EVAL(expr='{ANSWER0}')
|
FINAL_ANSWER=RESULT(var=ANSWER1)
|
torch.Size([3, 3, 448, 448])
|
torch.Size([3, 3, 448, 448])
|
torch.Size([1, 3, 448, 448])
|
torch.Size([5, 3, 448, 448])
|
question: ['Is the drain in the bottom of the basin visible?'], responses:['yes']
|
[('yes', 0.1298617250866936), ('congratulations', 0.12464161604141298), ('no', 0.12445222599225532), ('honey', 0.12437056445881921), ('solid', 0.12422595371654564), ('right', 0.12419889376311324), ('candle', 0.12414264780165109), ('chocolate', 0.12410637313950891)]
|
[['yes', 'congratulations', 'no', 'honey', 'solid', 'right', 'candle', 'chocolate']]
|
torch.Size([1, 3, 448, 448]) knan debug pixel values shape
|
question: ['Is part of a round metal tray visible between at least two slices of pizza?'], responses:['yes']
|
question: ['Does the image show the back end of a bus?'], responses:['yes']
|
[('yes', 0.1298617250866936), ('congratulations', 0.12464161604141298), ('no', 0.12445222599225532), ('honey', 0.12437056445881921), ('solid', 0.12422595371654564), ('right', 0.12419889376311324), ('candle', 0.12414264780165109), ('chocolate', 0.12410637313950891)]
|
[['yes', 'congratulations', 'no', 'honey', 'solid', 'right', 'candle', 'chocolate']]
|
[('yes', 0.1298617250866936), ('congratulations', 0.12464161604141298), ('no', 0.12445222599225532), ('honey', 0.12437056445881921), ('solid', 0.12422595371654564), ('right', 0.12419889376311324), ('candle', 0.12414264780165109), ('chocolate', 0.12410637313950891)]
|
[['yes', 'congratulations', 'no', 'honey', 'solid', 'right', 'candle', 'chocolate']]
|
torch.Size([3, 3, 448, 448]) knan debug pixel values shape
|
torch.Size([3, 3, 448, 448]) knan debug pixel values shape
|
dynamic ViT batch size: 3, images per sample: 3.0, dynamic token length: 844
|
question: ['Is the toe of the shoe pointed to the left?'], responses:['yes']
|
dynamic ViT batch size: 3, images per sample: 3.0, dynamic token length: 847
|
[('yes', 0.1298617250866936), ('congratulations', 0.12464161604141298), ('no', 0.12445222599225532), ('honey', 0.12437056445881921), ('solid', 0.12422595371654564), ('right', 0.12419889376311324), ('candle', 0.12414264780165109), ('chocolate', 0.12410637313950891)]
|
[['yes', 'congratulations', 'no', 'honey', 'solid', 'right', 'candle', 'chocolate']]
|
tensor([5.3275e-01, 1.1398e-02, 4.5345e-01, 9.0136e-04, 1.6238e-04, 6.0110e-04,
|
9.1763e-05, 6.5200e-04], device='cuda:1', grad_fn=<SoftmaxBackward0>)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.