Add files using upload-large-folder tool
Browse filesThis view is limited to 50 files because it contains too many changes. See raw diff
- .gitattributes +256 -0
- 2002.09089/main_diagram/main_diagram.drawio +1 -0
- 2002.09089/main_diagram/main_diagram.pdf +0 -0
- 2002.09089/paper_text/intro_method.md +80 -0
- 2004.02958/paper.pdf +3 -0
- 2004.10353/paper.pdf +3 -0
- 2004.14243/paper.pdf +3 -0
- 2007.07670/paper.pdf +3 -0
- 2007.13143/paper.pdf +3 -0
- 2010.02696/paper.pdf +3 -0
- 2010.08258/paper.pdf +3 -0
- 2010.09345/paper.pdf +3 -0
- 2010.10258/paper.pdf +3 -0
- 2010.13924/paper.pdf +3 -0
- 2011.03017/paper.pdf +3 -0
- 2103.16886/paper.pdf +3 -0
- 2104.00380/paper.pdf +3 -0
- 2104.07058/paper.pdf +3 -0
- 2105.08997/paper.pdf +3 -0
- 2105.09574/main_diagram/main_diagram.drawio +1 -0
- 2105.09574/main_diagram/main_diagram.pdf +0 -0
- 2105.09574/paper_text/intro_method.md +34 -0
- 2105.09803/paper.pdf +3 -0
- 2106.06959/paper.pdf +3 -0
- 2107.01396/paper.pdf +3 -0
- 2108.07520/main_diagram/main_diagram.drawio +0 -0
- 2108.07520/paper_text/intro_method.md +42 -0
- 2109.04518/paper.pdf +3 -0
- 2109.04712/paper.pdf +3 -0
- 2109.05361/paper.pdf +3 -0
- 2109.11593/paper.pdf +3 -0
- 2110.05055/paper.pdf +3 -0
- 2110.05419/paper.pdf +3 -0
- 2110.06176/main_diagram/main_diagram.drawio +1 -0
- 2110.06176/main_diagram/main_diagram.pdf +0 -0
- 2110.06176/paper_text/intro_method.md +89 -0
- 2110.07679/main_diagram/main_diagram.drawio +1 -0
- 2110.07679/main_diagram/main_diagram.pdf +0 -0
- 2110.07679/paper_text/intro_method.md +69 -0
- 2110.15072/main_diagram/main_diagram.drawio +1 -0
- 2110.15072/main_diagram/main_diagram.pdf +0 -0
- 2110.15072/paper_text/intro_method.md +137 -0
- 2111.00295/paper.pdf +3 -0
- 2111.00969/paper.pdf +3 -0
- 2111.09639/paper.pdf +3 -0
- 2111.15097/paper.pdf +3 -0
- 2112.00712/paper.pdf +3 -0
- 2112.03258/paper.pdf +3 -0
- 2112.05883/paper.pdf +3 -0
- 2112.06598/paper.pdf +3 -0
.gitattributes
CHANGED
|
@@ -570,3 +570,259 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
| 570 |
2503.09958/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 571 |
2206.09016/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 572 |
2503.02463/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 570 |
2503.09958/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 571 |
2206.09016/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 572 |
2503.02463/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 573 |
+
2404.11732/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 574 |
+
2310.05105/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 575 |
+
2305.15805/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 576 |
+
2507.06363/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 577 |
+
2305.19370/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 578 |
+
2310.12426/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 579 |
+
2507.13773/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 580 |
+
2406.10502/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 581 |
+
2206.11573/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 582 |
+
2311.02221/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 583 |
+
2204.11018/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 584 |
+
2403.17921/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 585 |
+
2401.12987/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 586 |
+
2010.10258/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 587 |
+
2306.10567/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 588 |
+
2205.15894/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 589 |
+
2312.02203/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 590 |
+
2204.08192/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 591 |
+
2501.03689/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 592 |
+
2210.08772/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 593 |
+
2312.06995/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 594 |
+
2302.12464/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 595 |
+
2306.00765/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 596 |
+
2208.00449/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 597 |
+
2511.15055/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 598 |
+
2405.10084/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 599 |
+
2311.03233/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 600 |
+
2409.12865/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 601 |
+
2409.00255/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 602 |
+
2507.03976/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 603 |
+
2310.05484/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 604 |
+
2302.05910/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 605 |
+
2210.07809/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 606 |
+
2401.06416/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 607 |
+
2307.15992/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 608 |
+
2402.04284/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 609 |
+
2206.14574/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 610 |
+
2010.13924/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 611 |
+
2106.06959/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 612 |
+
2310.12690/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 613 |
+
2302.04308/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 614 |
+
2111.00969/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 615 |
+
2501.12263/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 616 |
+
2305.13009/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 617 |
+
2406.03452/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 618 |
+
2403.09419/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 619 |
+
2411.03877/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 620 |
+
2505.15867/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 621 |
+
2303.12959/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 622 |
+
2311.05419/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 623 |
+
2306.08943/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 624 |
+
2104.00380/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 625 |
+
2302.13543/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 626 |
+
2212.09305/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 627 |
+
2210.15748/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 628 |
+
2401.04348/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 629 |
+
2401.04536/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 630 |
+
2303.13022/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 631 |
+
2210.09337/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 632 |
+
2506.03476/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 633 |
+
2007.07670/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 634 |
+
2403.05973/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 635 |
+
2307.00866/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 636 |
+
2011.03017/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 637 |
+
2401.17752/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 638 |
+
2401.13311/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 639 |
+
2303.10774/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 640 |
+
2212.10773/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 641 |
+
2311.01230/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 642 |
+
2109.04712/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 643 |
+
2301.12313/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 644 |
+
2104.07058/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 645 |
+
2410.08111/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 646 |
+
2407.02075/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 647 |
+
2409.05873/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 648 |
+
2404.05311/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 649 |
+
2510.17036/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 650 |
+
2202.03101/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 651 |
+
2410.23230/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 652 |
+
2507.22603/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 653 |
+
2505.17114/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 654 |
+
2010.02696/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 655 |
+
2506.04453/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 656 |
+
2302.13262/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 657 |
+
2109.11593/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 658 |
+
2103.16886/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 659 |
+
2507.06615/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 660 |
+
2010.09345/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 661 |
+
2405.15269/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 662 |
+
2407.13842/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 663 |
+
2306.07266/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 664 |
+
2504.18059/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 665 |
+
2210.06170/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 666 |
+
2311.10263/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 667 |
+
2404.03010/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 668 |
+
2210.08410/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 669 |
+
2312.03203/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 670 |
+
2309.12814/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 671 |
+
2112.05883/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 672 |
+
2407.05266/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 673 |
+
2207.10553/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 674 |
+
2306.10759/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 675 |
+
2301.13764/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 676 |
+
2407.14679/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 677 |
+
2506.09498/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 678 |
+
2406.07277/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 679 |
+
2206.06234/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 680 |
+
2112.03258/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 681 |
+
2004.10353/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 682 |
+
2307.14897/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 683 |
+
2209.08739/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 684 |
+
2402.18700/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 685 |
+
2406.12292/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 686 |
+
2312.04564/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 687 |
+
2409.08255/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 688 |
+
2310.07756/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 689 |
+
2110.05419/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 690 |
+
2212.06049/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 691 |
+
2311.15173/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 692 |
+
2410.01337/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 693 |
+
2210.13011/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 694 |
+
2504.11453/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 695 |
+
2212.14306/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 696 |
+
2406.13327/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 697 |
+
2301.13393/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 698 |
+
2207.07388/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 699 |
+
2304.11705/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 700 |
+
2111.15097/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 701 |
+
2501.08837/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 702 |
+
2410.15136/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 703 |
+
2202.06200/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 704 |
+
2402.02352/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 705 |
+
2203.06835/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 706 |
+
2310.14676/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 707 |
+
2306.14696/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 708 |
+
2501.15654/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 709 |
+
2112.06598/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 710 |
+
2203.03014/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 711 |
+
2302.07867/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 712 |
+
2404.09216/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 713 |
+
2406.10393/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 714 |
+
2207.09679/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 715 |
+
2111.09639/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 716 |
+
2508.09468/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 717 |
+
2004.02958/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 718 |
+
2508.16122/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 719 |
+
2201.11945/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 720 |
+
2402.02026/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 721 |
+
2506.01776/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 722 |
+
2510.14741/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 723 |
+
2304.14364/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 724 |
+
2408.00415/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 725 |
+
2306.04785/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 726 |
+
2004.14243/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 727 |
+
2407.13545/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 728 |
+
2307.01189/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 729 |
+
2310.05175/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 730 |
+
2401.16475/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 731 |
+
2407.02466/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 732 |
+
2501.09527/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 733 |
+
2206.08496/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 734 |
+
2210.10769/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 735 |
+
2506.00135/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 736 |
+
2310.03094/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 737 |
+
2110.05055/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 738 |
+
2305.13812/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 739 |
+
2511.09058/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 740 |
+
2401.00167/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 741 |
+
2304.13207/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 742 |
+
2407.13089/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 743 |
+
2304.00451/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 744 |
+
2408.09849/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 745 |
+
2305.15987/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 746 |
+
2311.03534/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 747 |
+
2210.13664/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 748 |
+
2312.04668/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 749 |
+
2201.01155/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 750 |
+
2303.01074/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 751 |
+
2409.01448/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 752 |
+
2404.16163/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 753 |
+
2406.07524/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 754 |
+
2310.15003/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 755 |
+
2206.03171/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 756 |
+
2309.14339/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 757 |
+
2203.00725/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 758 |
+
2502.18993/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 759 |
+
2210.08607/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 760 |
+
2107.01396/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 761 |
+
2402.11089/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 762 |
+
2212.14593/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 763 |
+
2505.06186/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 764 |
+
2406.09187/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 765 |
+
2304.06813/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 766 |
+
2111.00295/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 767 |
+
2412.01543/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 768 |
+
2503.10149/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 769 |
+
2510.11824/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 770 |
+
2409.06123/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 771 |
+
2407.03886/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 772 |
+
2203.06107/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 773 |
+
2405.18080/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 774 |
+
2407.21335/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 775 |
+
2406.01194/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 776 |
+
2405.01502/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 777 |
+
2307.09437/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 778 |
+
2411.09502/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 779 |
+
2312.01261/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 780 |
+
2205.00979/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 781 |
+
2201.12126/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 782 |
+
2409.13980/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 783 |
+
2403.10044/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 784 |
+
2109.05361/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 785 |
+
2410.16451/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 786 |
+
2310.10013/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 787 |
+
2203.15065/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 788 |
+
2307.03476/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 789 |
+
2510.01240/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 790 |
+
2105.09803/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 791 |
+
2010.08258/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 792 |
+
2403.18730/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 793 |
+
2501.09221/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 794 |
+
2210.03649/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 795 |
+
2204.06355/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 796 |
+
2211.10243/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 797 |
+
2204.09890/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 798 |
+
2401.04984/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 799 |
+
2503.08723/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 800 |
+
2203.12892/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 801 |
+
2109.04518/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 802 |
+
2112.00712/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 803 |
+
2306.03522/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 804 |
+
2310.11466/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 805 |
+
2302.00845/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 806 |
+
2007.13143/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 807 |
+
2406.06264/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 808 |
+
2403.16605/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 809 |
+
2309.06415/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 810 |
+
2501.15695/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 811 |
+
2405.18654/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 812 |
+
2403.20320/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 813 |
+
2308.12112/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 814 |
+
2402.00258/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 815 |
+
2203.13097/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 816 |
+
2308.00273/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 817 |
+
2306.05628/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 818 |
+
2505.22928/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 819 |
+
2105.08997/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 820 |
+
2404.02478/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 821 |
+
2412.02646/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 822 |
+
2212.10154/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 823 |
+
2207.00614/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 824 |
+
2404.04929/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 825 |
+
2407.01567/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 826 |
+
2405.02842/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 827 |
+
2304.04970/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
| 828 |
+
2412.01007/paper.pdf filter=lfs diff=lfs merge=lfs -text
|
2002.09089/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="www.draw.io" modified="2020-02-13T05:07:41.310Z" agent="Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/79.0.3945.130 Safari/537.36" etag="DiGsH_kTYT18h6bqaT3P" version="12.6.8" type="device" pages="2"><diagram id="C5RBs43oDa-KdzZeNtuy" name="Page-1">7V1de6K6Fv41Ps+ei/oAIXxcVlun0z2fZ2ZOZ587ClHpIDCIre5ff8KXQhIUKQG02otKxAhrve/KWisrYQDGi/X7wPDnnzwLOQNJsNYDcDOQJFGWdfwvatkkLYqkJA2zwLbSk3YN3+1/UdoopK0r20LLwomh5zmh7RcbTc91kRkW2owg8F6Kp009p/irvjFDVMN303Do1gfbCudJqyapu/Y7ZM/m2S+LSnrDCyM7Ob2T5dywvJdcE7gdgHHgeWHybrEeIycSXiaXhw+bB+fjb+X9/bflH+Pn6O8fn/97lXQ2OeYr21sIkBvW7tq/G03lL3cmfAkfVFd5Wn+cP6dfEZ4NZ5XKK73XcJMJMPBWroWiTsQBGL3M7RB99w0z+vQFQwa3zcOFk37sGI/IGRnm71n8tbHneAH+yPVcfP5oGQbeb5Q1DiQwnkR/+JOp7Ti59kn8ito9N0wxhQGVHBfOu9EVBbcbjj1zcZuDplhCo1lgWDYqnDqNX/izisJMhf6MghCtc1BKhfseeQsUBht8SvppBpOUJ7KaHr/sUKcqads8hzgJpo1GivTZtuudNvGbVKFs5X74tfyJVpLpfd787+7fxZ97/9unK6AxtKs4kYD8go6VP6sIxCPHdtFVdm3X+BQdcwHuPsfvZun/uJfHoH8tjNuKQHO1jFEU3RTw1/tvqYEuWr3261XoIdfEpjvIesN48Xe/8BpGV+NsSnBe3FLEIrmgSJNLhAxyiTovcsnnRq6a4PvgYjUuUewhuMbCNpdnikFQBYMyC4MSLwzCCwbjLn6ghe8FRuy42tMpwqrHSDpPGMpK72CYRQQ5GE6QEa6CyCrEA5PtzvrjIfBv4QOytgZXCdCI0lmOK+AFKFGlADX23GfPWYW258Y8d4wNHna6VvRbgIdGoEOh0QEFlr3ReMGDDlo/uP4qpASLA3Y/emuuHtFhyT4mavj4uG3YhrFfVmE0cqbtlhH8/oK7scNIJMJQgMVGKW6NlZVEripH7YhCUT1AoNWjtToaAIZP0lPFiCB1MCbYcXWiL90h5xmFtmlwVBmZKQCMEVxlqUzg5kceVhlyresoGxdpzTGWS9ssKuk4YSGrkLSjRVUwLrQosrYAOUZoPxdTfSzxpL/w1bPx5e3IQ9g2oBASXnqrwETpt6Rcbo3oiFKpQlxLaAQzFFIdxdra3nZ9BdIOWP841zrPACB5pg5FSFGNlZPT4FDdg6ZXkU1/o2Tb+pCZOqA+hPXoRmlWEciuOBNOpH2QzNOLcF4lktWOyyHiy0x6LvEiO+d3NqbCLrguk4iQwBBqwu4lU7xnQV2VhhosB/brghqRAZm3QHzQHPEpNbdPfOkEhtqMilIXVISkLwTwpYoU/VgpKgwMXedFvwpxyVnSTyad3Pr0o1TbPv0qeLo5Laa52FNXIVQailMUMltAdsRZfRLLbTp79ZFSl8lAvrb6yCQOb/W17sIkN7TvkuReq5rSUGVVUw5ty6qu4Oe0rWrQa1XXZzU40BFvVVfwjHZzFEKNOQqgRH88p7Bkgi2M4iuRAQVy+GtuSlQ+O5lu8zddyRRUkOle56EocCMw08pCKT50zbkXxCS7sQNkRjON6ZlL1/B/eAn/TtkNgQ0ZLB0W+yFrrErsFVaMscmd5kcnLMuvVxUh83rLLos6Xy+cj98kV9Co8QTHzZ0cAuVpAkstAgvWHglhPafnaGQB9gWXIqvEGeOLrONC3fNJWKhCQ3CiOpLbdawAXcbyNjRIGgRqhKhsEEjL0rYGK0zWHeXGEescbuCtdiOz3DtFvR7pkzjBu5xvi4OOQ8MRvh4skfPBkoRyaLyuuvq4RNEZUUcrOjGQXB1SmTpkR2q71JEr5IoaioDaXIhABEWQEWiyaiX5EeW4oOiMiUKWG9YlitJy+kWuEEGcIFEIl1lh1CWKrPQBP6a8VX+aAjgp4tpMIQvAeTOF9qex9RPCwHCXvrfE/IiXET+fI30YNflM+nCryZcruMKH6IOFFGx+RWoYwuzwn1Qr8cHNunC0GdTJihycSIB6r/gJ4YF8WuXp+WxNYjYkgnYn52EFp/30qAiI+gnIoCJz6Sm3am2pYTETQentZCJPxsyg1NTQ47StoJSs8uxe8A342nVNYEHihMIsA2lTk6Uw9fZaueVhQaVeWVBAWFBY18ORBcIUt1wfAxtYiUGBICmAPVXvFZJWgEwBVh4dyY5aTohAVvyR1HbbWVW3FebKve0KKwYbG0Tzm67wMuhQLElJ5ZAEGEjit36+6fRuLxwWCuhdj5tKBb+QniMtjHi1Y/DEOuy7uH6ZO7Jat3ZaqxQFbS1Jazr/21MvlVJY52yrEB68DS+1Z9SGQkNeKpUwaNlLVRpYSkGBYLuV23Eg6IluqSRBXS+V6qhlL1XhXri4VTTtfuq3o9aSC4o6POyNSrBNw93ABEXOcANNzZvuK2EoygfMd3z0FQU2viEUDE64JI0K5OvmVGVBHmaK6cqTKt/lq+paY1Hw17EqqfXG48lAU/4Kr6zwXdWVxphh4T6Op/47I+LJduk0MZQifI0ivtqm4VynHyxsy4p+hmlUimaH3Bm0ZOfR/M6fxOahqqCDMc+9WSjXkFFVw+IAt3hXYcW7BSSZW/HskALE+EWD59kI/soA9O5d/aXr/QQUL1SQtVaSXm1tOjdUqCx3rg4qKtqhUiN04haIF2AgEPtlRlRWXWt9M0Kp/2JBDhWhFQEhC2CoKd1iIhvqXuuklHso4ombhx46KEDS+2VZNFbS8DIU9cbylKY5OgMMK9n5BndAnnjBixFYA3oXbuZg6p/Rhshi/zZE1ppOopWlysqTa60k0SRZHxK+CGv6Q2IJn5vsG8iinVkJBQ4xKTXVzU+z+mo5Ra1VWFZ3ohUSol4u3QNbVvIjFCv9eCShLmnplD0ipd/amemkr9wWh8R0RstZav1QAJg4HMbJ+9b7H3FV9oAqbkMwkEtte2dOeXsbyrTpZ2qVnJ1WbbPOCn+OtM31ajZOe7o+2mmS1KVA7DxZ2Q4z+mrZ9m53hu88bzKQRqeep+NmqQVlKJesWe3KUovZxqyNp22NQsb2XDL6/Rr7AcRjv6DvXmpn6GI+XLReMjeNtY8zSlJJfcP1am07thFLzfGWS7Q8LQAeBtx3Yq/nVp1PJdqwOfeSCwBUGI8KYiaDmkCg+/sBrtfTJ0tCc//ps//zfnHPfLztG8wO/7j6z+2v3iWEC45ndPDVCDFh3LgFc5ojcMldPFgpY+bmBNyQWqFa/pJkqebc63LRKBVHxdr5Fh0OhfyLmDrl5/Iz8VIh8Du9EFspSWG1EF4zhcxjKkFXVHAdyRWt7TC3xAIf/ZMRGb/fsTQ6qLcRQn7t0z4Q9YS35IY+dTe5389/jrMVTBlzn6zYAorI3SDRgkglx1le1NWkDsvpmYI/WOtaM7IN3nRQu0UbtzFgb0jRZkzL9tMacNTObHqZ2A669tTyoX6aM9Rr/+nrwzfx/u+nx0d97mw+TR+f9sSKr3qim6QMxmCgq9n/6Ak2p2QtSvhbFaV7akckaipUkFl1rKzanSb4zUTBwVr4eihQE+WrFwwUKtul4soqoHUPANYOLA0AQJMT1Wf/5QsCkoQKsU6P8YDWVtVfvknLq9SP3YaE+el/IF30HzOe3PFf14Yy6BYCB1e31IOAnmhevwCgkImVZQIAYrfaf/1iW6b24RpeFB4rjnwaLKNaqVWFM5/o2oTLt1YvGo97IR9A2rnGOTn5F45no3rvOM5akNGAxsEaXDQea7M9juPDwIu0sEv+4Dudf/IsFJ3xfw==</diagram><diagram id="TnKWbZ1Ej7AmrP5yXyUu" name="Page-2">ldFLE4IgEADgX8OxGZTscbbXocfBQ3kkWZUJXYdosn59NmjGeKkTy8eyy4OwsKjXmlf5DgUo4lNRE7Ygvs+CMW2GtzysTCbzVjIthTWvh0g+ocUu7SYFXJ1Eg6iMrFxMsCwhMY5xrfHupqWo3K4Vz2AAUcLVUI9SmNzqLKC9b0BmedfZo+1KwbvkFq45F3j/IrYkLNSIxkZFHYJ6v173LvE2jg5jek6PVbSXcLrcsunIFlv9s+VzBQ2l+bV0E/RHaybOD7PlCw==</diagram></mxfile>
|
2002.09089/main_diagram/main_diagram.pdf
ADDED
|
Binary file (45.2 kB). View file
|
|
|
2002.09089/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,80 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
It is important that robots and other autonomous agents can safely learn from and adapt to a variety of human preferences and goals. One common way to learn preferences and goals is via imitation learning, in which an autonomous agent learns how to perform a task by observing demonstrations of the task [@Argall2009]. When learning from demonstrations, it is important for an agent to be able to provide high-confidence bounds on its performance with respect to the demonstrator; however, while there exists much work on high-confidence off-policy evaluation in the reinforcement learning (RL) setting, there has been much less work on high-confidence policy evaluation in the imitation learning setting, where the reward samples are unavailable.
|
| 4 |
+
|
| 5 |
+
Prior work on high-confidence policy evaluation for imitation learning has used Bayesian inverse reinforcement learning (IRL) [@ramachandran2007bayesian] to allow an agent to reason about reward uncertainty and policy generalization error [@brown2018risk]. However, Bayesian IRL is typically intractable for complex problems due to the need to repeatedly solve an MDP in the inner loop, resulting in high computational cost as well as high sample cost if a model is not available. This precludes robust safety and uncertainty analysis for imitation learning in high-dimensional problems or in problems in which a model of the MDP is unavailable. We seek to remedy this problem by proposing and evaluating a method for safe and efficient Bayesian reward learning via preferences over demonstrations. Preferences over trajectories are intuitive for humans to provide [@akrour2011preference; @wilson2012bayesian; @sadigh2017active; @christiano2017deep; @palan2019learning] and enable better-than-demonstrator performance [@browngoo2019trex; @brown2019drex]. To the best of our knowledge, we are the first to show that preferences over demonstrations enable both fast Bayesian reward learning in high-dimensional, visual control tasks as well as efficient high-confidence performance bounds.
|
| 6 |
+
|
| 7 |
+
We first formalize the problem of high-confidence policy evaluation [@thomas2015high] for imitation learning. We then propose a novel algorithm, Bayesian Reward Extrapolation (Bayesian REX), that uses a pairwise ranking likelihood to significantly increase the efficiency of generating samples from the posterior distribution over reward functions. We demonstrate that Bayesian REX can leverage neural network function approximation to learn useful reward features via self-supervised learning in order to efficiently perform deep Bayesian reward inference from visual demonstrations. Finally, we demonstrate that samples obtained from Bayesian REX can be used to solve the high-confidence policy evaluation problem for imitation learning. We evaluate our method on imitation learning for Atari games and demonstrate that we can efficiently compute high-confidence bounds on policy performance, without access to samples of the reward function. We use these high-confidence performance bounds to rank different evaluation policies according to their risk and expected return under the posterior distribution over the unknown ground-truth reward function. Finally, we provide evidence that bounds on uncertainty and risk provide a useful tool for detecting reward hacking/gaming [@amodei2016concrete], a common problem in reward inference from demonstrations [@ibarz2018reward] as well as reinforcement learning [@ng1999policy; @leike2017ai].
|
| 8 |
+
|
| 9 |
+
# Method
|
| 10 |
+
|
| 11 |
+
We model the environment as a Markov Decision Process (MDP) consisting of states $\mathcal{S}$, actions $\mathcal{A}$, transition dynamics $T:\mathcal{S} \times \mathcal{A} \times \mathcal{S} \to [0,1]$, reward function $R:\mathcal{S} \to \mathbb{R}$, initial state distribution $S_0$, and discount factor $\gamma$. Our approach extends naturally to rewards defined as $R(s,a)$ or $R(s,a,s')$; however, state-based rewards have some advantages. @airl prove that a state-only reward function is a necessary and sufficient condition for a reward function that is disentangled from dynamics. Learning a state-based reward also allows the learned reward to be used as a potential function for reward shaping [@ng1999policy], if a sparse ground-truth reward function is available.
|
| 12 |
+
|
| 13 |
+
A policy $\pi$ is a mapping from states to a probability distribution over actions. We denote the value of a policy $\pi$ under reward function $R$ as $V^\pi_R = \mathbb{E}_\pi[\sum_{t=0}^\infty \gamma^t R(s_t) | s_0 \sim S_0]$ and denote the value of executing policy $\pi$ starting at state $s \in \mathcal{S}$ as $V^\pi_R(s) = \mathbb{E}_\pi[\sum_{t=0}^\infty \gamma^t R(s_t) | s_0 = s]$. Given a reward function $R$, the Q-value of a state-action pair $(s,a)$ is $Q^{\pi}_R(s,a) = \mathbb{E}_\pi[\sum_{t=0}^\infty \gamma^t R(s_t) | s_0 = s, a_0 = a]$. We also denote $V^*_R = \max_{\pi} V^\pi_R$ and $Q^*_R(s,a) = \max_{\pi} Q^{\pi}_R(s,a)$.
|
| 14 |
+
|
| 15 |
+
Bayesian inverse reinforcement learning (IRL) [@ramachandran2007bayesian] models the environment as an MDP$\setminus$R in which the reward function is unavailable. Bayesian IRL seeks to infer the latent reward function of a Boltzman-rational demonstrator that executes the following policy $$\begin{equation}
|
| 16 |
+
\label{eq:boltzman_demonstrator}
|
| 17 |
+
\pi^\beta_R(a|s) = \frac{e^{\beta Q^*_R(s,a)}}{\sum_{b \in \mathcal{A}} e^{\beta Q^*_R(s,b)}},
|
| 18 |
+
\end{equation}$$ in which $R$ is the true reward function of the demonstrator, and $\beta \in [0, \infty)$ represents the confidence that the demonstrator is acting optimally. Under the assumption of Boltzman rationality, the likelihood of a set of demonstrated state-action pairs, $D = \{ (s,a) : (s,a) \sim \pi_D \}$, given a specific reward function hypothesis $R$, can be written as $$\begin{equation}
|
| 19 |
+
\label{eqn:boltzman_likelihood}
|
| 20 |
+
P(D | R) = \prod_{(s,a) \in D} \pi^\beta_R(a|s) = \prod_{(s,a) \in D} \frac{e^{\beta Q^*_R(s,a)}}{\sum_{b \in \mathcal{A}} e^{\beta Q^*_R(s,b)}}.
|
| 21 |
+
\end{equation}$$
|
| 22 |
+
|
| 23 |
+
Bayesian IRL generates samples from the posterior distribution $P(R|D) \sim P(D|R)P(R)$ via Markov Chain Monte Carlo (MCMC) sampling, but this requires solving for $Q^*_{R'}$ to compute the likelihood of each new proposal $R'$. Thus, Bayesian IRL methods are only used for low-dimensional problems with reward functions that are often linear combinations of a small number of hand-crafted features [@bobu2018learning; @biyik2019asking]. One of our contributions is an efficient Bayesian reward inference algorithm that leverages preferences over demonstrations in order to significantly improve the efficiency of Bayesian reward inference.
|
| 24 |
+
|
| 25 |
+
Before detailing our approach, we first formalize the problem of high-confidence policy evaluation for imitation learning. We assume access to an MDP$\setminus$R, an evaluation policy $\pi_{\rm eval}$, a set of demonstrations, $D = \{\tau_1,\ldots,\tau_m\}$, in which $\tau_i$ is either a complete or partial trajectory comprised of states or state-action pairs, a confidence level $\delta$, and performance statistic $g:\Pi \times \mathcal{R} \rightarrow \mathbb{R}$, in which $\mathcal{R}$ denotes the space of reward functions and $\Pi$ is the space of all policies.
|
| 26 |
+
|
| 27 |
+
The *High-Confidence Policy Evaluation problem for Imitation Learning* (HCPE-IL) is to find a high-confidence lower bound $\hat{g}: \Pi \times \mathcal{D} \rightarrow \mathbb{R}$ such that $$\begin{equation}
|
| 28 |
+
\text{Pr}(g(\pi_{\rm eval}, R^*) \geq \hat{g}(\pi_{\rm eval}, D)) \geq 1 - \delta,
|
| 29 |
+
\end{equation}$$ in which $R^*$ denotes the demonstrator's true reward function and $\mathcal{D}$ denotes the space of all possible demonstration sets. HCPE-IL takes as input an evaluation policy $\pi_{\rm eval}$, a set of demonstrations $D$, and a performance statistic, $g$, which evaluates a policy under a reward function. The goal of HCPE-IL is to return a high-confidence lower bound $\hat{g}$ on the performance statistic $g(\pi_{\rm eval}, R^*)$.
|
| 30 |
+
|
| 31 |
+
We now describe our main contribution: a method for scaling Bayesian reward inference to high-dimensional visual control tasks as a way to efficiently solve the HCPE-IL problem for complex imitation learning tasks. Our first insight is that the main bottleneck for standard Bayesian IRL [@ramachandran2007bayesian] is computing the likelihood function in Equation ([\[eqn:boltzman_likelihood\]](#eqn:boltzman_likelihood){reference-type="ref" reference="eqn:boltzman_likelihood"}) which requires optimal Q-values. Thus, to make Bayesian reward inference scale to high-dimensional visual domains, it is necessary to either efficiently approximate optimal Q-values or to formulate a new likelihood. Value-based reinforcement learning focuses on efficiently learning optimal Q-values; however, for complex visual control tasks, RL algorithms can take several hours or even days to train [@mnih2015human; @hessel2018rainbow]. This makes MCMC, which requires evaluating large numbers of likelihood ratios, infeasible given the current state-of-the-art in value-based RL. Methods such as transfer learning have great potential to reduce the time needed to calculate $Q^*_R$ for a new proposed reward function $R$; however, transfer learning is not guaranteed to speed up reinforcement learning [@taylor2009transfer]. Thus, we choose to focus on reformulating the likelihood function as a way to speed up Bayesian reward inference.
|
| 32 |
+
|
| 33 |
+
An ideal likelihood function requires little computation and minimal interaction with the environment. To accomplish this, we leverage recent work on learning control policies from preferences [@christiano2017deep; @palan2019learning; @biyik2019asking]. Given ranked demonstrations, @browngoo2019trex propose Trajectory-ranked Reward Extrapolation (T-REX): an efficient reward inference algorithm that transforms reward function learning into classification problem via a pairwise ranking loss. T-REX removes the need to repeatedly sample from or partially solve an MDP in the inner loop, allowing it to scale to visual imitation learning domains such as Atari and to extrapolate beyond the performance of the best demonstration. However, T-REX only solves for a point estimate of the reward function. We now discuss how a similar approach based on a pairwise preference likelihood allows for efficient sampling from the posterior distribution over reward functions.
|
| 34 |
+
|
| 35 |
+
We assume access to a sequence of $m$ trajectories, $D = \{ \tau_1,\ldots,\tau_m \}$, along with a set of pairwise preferences over trajectories $\mathcal{P} = \{(i,j) : \tau_i \prec \tau_j \}$. Note that we do not require a total-ordering over trajectories. These preferences may come from a human demonstrator or could be automatically generated by watching a learner improve at a task [@jacq2019learning; @browngoo2019trex] or via noise injection [@brown2019drex]. Given trajectory preferences, we can formulate a pair-wise ranking likelihood to compute the likelihood of a set of preferences over demonstrations $\mathcal{P}$, given a parameterized reward function hypothesis $R_\theta$. We use the standard Bradley-Terry model [@bradley1952rank] to obtain the following pairwise ranking likelihood function, commonly used in learning to rank applications such collaborative filtering [@volkovs2014new]: $$\begin{equation}
|
| 36 |
+
\label{eqn:pairwiserank}
|
| 37 |
+
P(D, \mathcal{P} \mid R_\theta) = \prod_{(i,j) \in \mathcal{P}} \frac{e^{\beta R_\theta(\tau_j)}}{e^{\beta R_\theta(\tau_i)} + e^{\beta R_\theta(\tau_j)}},
|
| 38 |
+
\end{equation}$$ in which $R_\theta(\tau) = \sum_{s \in \tau} R_\theta(s)$ is the predicted return of trajectory $\tau$ under the reward function $R_\theta$, and $\beta$ is the inverse temperature parameter that models the confidence in the preference labels. We can then perform Bayesian inference via MCMC to obtain samples from $P(R_\theta \mid D, \mathcal{P}) \propto P(D, \mathcal{P} \mid R_\theta) P(R_\theta)$. We call this approach Bayesian Reward Extrapolation or Bayesian REX.
|
| 39 |
+
|
| 40 |
+
Note that using the likelihood function defined in Equation ([\[eqn:pairwiserank\]](#eqn:pairwiserank){reference-type="ref" reference="eqn:pairwiserank"}) does not require solving an MDP. In fact, it does not require any rollouts or access to the MDP. All that is required is that we first calculate the return of each trajectory under $R_\theta$ and compare the relative predicted returns to the preference labels to determine the likelihood of the demonstrations under the reward hypothesis $R_\theta$. Thus, given preferences over demonstrations, Bayesian REX is significantly more efficient than standard Bayesian IRL. In the following section, we discuss further optimizations that improve the efficiency of Bayesian REX and make it more amenable to our end goal of high-confidence policy evaluation bounds.
|
| 41 |
+
|
| 42 |
+
{#fig:BayesianREX width="\\linewidth"}
|
| 43 |
+
|
| 44 |
+
In order to learn rich, complex reward functions, it is desirable to use a deep network to represent the reward function $R_\theta$. While MCMC remains the gold-standard for Bayesian Neural Networks, it is often challenging to scale to deep networks. To make Bayesian REX more efficient and practical, we propose to limit the proposal to only change the last layer of weights in $R_\theta$ when generating MCMC proposals---we will discuss pre-training the bottom layers of $R_\theta$ in the next section. After pre-training, we freeze all but the last layer of weights and use the activations of the penultimate layer as the latent reward features $\phi(s) \in \mathbb{R}^k$. This allows the reward at a state to be represented as a linear combination of $k$ features: $R_\theta(s) = w^T \phi(s)$. Similar to work by @pradier2018projected, operating in a lower-dimensional latent space makes full Bayesian inference tractable.
|
| 45 |
+
|
| 46 |
+
A second advantage of using a learned linear reward function is that it allows us to efficiently compute likelihood ratios when performing MCMC. Consider the likelihood function in Equation ([\[eqn:pairwiserank\]](#eqn:pairwiserank){reference-type="ref" reference="eqn:pairwiserank"}). If we do not represent $R_\theta$ as a linear combination of pretrained features, and instead let any parameter in $R_\theta$ change during each proposal, then for $m$ demonstrations of length $T$, computing $P(D, \mathcal{P} \mid R_\theta)$ for a new proposal $R_\theta$ requires $O(mT)$ forward passes through the entire network to compute $R_\theta(\tau_i)$. Thus, the complexity of generating $N$ samples from the posterior results is $O(mTN |R_\theta|)$, where $|R_\theta|$ is the number of computations required for a full forward pass through the entire network $R_\theta$. Given that we would like to use a deep network to parameterize $R_\theta$ and generate thousands of samples from the posterior distribution over $R_\theta$, this many computations will significantly slow down MCMC proposal evaluation.
|
| 47 |
+
|
| 48 |
+
If we represent $R_\theta$ as a linear combination of pre-trained features, we can reduce this computational cost because $$\begin{equation}
|
| 49 |
+
R_\theta (\tau) = \sum_{s \in \tau} w^T\phi(s) = w^T\sum_{s \in \tau} \phi(s) = w^T \Phi_{\tau}.
|
| 50 |
+
\end{equation}$$ Thus, we can precompute and cache $\Phi_{\tau_i} = \sum_{s \in \tau_i} \phi(s)$ for $i = 1,\ldots,m$ and rewrite the likelihood as $$\begin{equation}
|
| 51 |
+
\label{eqn:lincombo_Boltzman}
|
| 52 |
+
P(D, \mathcal{P} \mid R_\theta) = \prod_{(i,j) \in \mathcal{P}} \frac{e^{\beta w^T \Phi_{\tau_j}}}{e^{\beta w^T \Phi_{\tau_j}} + e^{\beta w^T \Phi_{\tau_i}}}.
|
| 53 |
+
\end{equation}$$ Note that demonstrations only need to be passed through the reward network once to compute $\Phi_{\tau_i}$ since the pre-trained embedding remains constant during MCMC proposal generation. This results in an initial $O(mT)$ passes through all but the last layer of $R_\theta$ to obtain $\Phi_{\tau_i}$, for $i=1,\ldots,m$, and then only $O(mk)$ multiplications per proposal evaluation thereafter---each proposal requires that we compute $w^T\Phi_{\tau_i}$ for $i=1,\ldots,m$ and $\Phi_{\tau_i} \in \mathbb{R}^k$. Thus, when using feature pre-training, the total complexity is only $O(mT|R_\theta| + mkN)$ to generate $N$ samples via MCMC. This reduction in the complexity of MCMC from $O(mTN |R_\theta|)$ to $O(mT|R_\theta| + mkN)$ results in significant and practical computational savings because (1) we want to make $N$ and $R_\theta$ large and (2) the number of demonstrations, $m$, and the size of the latent embedding, $k$, are typically several orders of magnitude smaller than $N$ and $|R_\theta|$.
|
| 54 |
+
|
| 55 |
+
A third, and critical advantage of using a learned linear reward function is that it makes solving the HCPE-IL problem discussed in Section [4](#sec:hcpe-il){reference-type="ref" reference="sec:hcpe-il"} tractable. Performing a single policy evaluation is a non-trivial task [@sutton2000policy] and even in tabular settings has complexity $O(|S|^3)$ in which $|S|$ is the size of the state-space [@littman1995complexity]. Because we are in an imitation learning setting, we would like to be able to efficiently evaluate any given policy across the posterior distribution over reward functions found via Bayesian REX. Given a posterior distribution over $N$ reward function hypotheses we would need to solve $N$ policy evaluations. However, note that given $R(s) = w^T \phi(s)$, the value function of a policy can be written as $$\begin{equation}
|
| 56 |
+
V^\pi_R = \mathbb{E}_{\pi}[\sum_{t=0}^T R(s_t)] = w^T \mathbb{E}_{\pi}[\sum_{t=0}^T \phi(s_t)] = w^T \Phi_\pi,
|
| 57 |
+
\end{equation}$$ in which we assume a finite horizon MDP with horizon $T$ and in which $\Phi_\pi$ are the expected feature counts [@abbeel2004apprenticeship; @barreto2017successor] of $\pi$. Thus, given any evaluation policy $\pi_{\rm eval}$, we only need to solve one policy evaluation problem to compute $\Phi_{\rm eval}$. We can then compute the expected value of $\pi_{\rm eval}$ over the entire posterior distribution of reward functions via a single matrix vector multiplication $W \Phi_{\pi_{\rm eval}}$, where $W$ is an $N$-by-$k$ matrix with each row corresponding to a single reward function weight hypothesis $w^T$. This significantly reduces the complexity of policy evaluation over the reward function posterior distribution from $O(N |S|^3)$ to $O(|S|^3 + Nk)$.
|
| 58 |
+
|
| 59 |
+
When we refer to Bayesian REX we will refer to the optimized version described in this section (see the Appendix for full implementation details and pseudo-code)[^1] . Running MCMC with 66 preference labels to generate 100,000 reward hypothesis for Atari imitation learning tasks takes approximately 5 minutes on a Dell Inspiron 5577 personal laptop with an Intel i7-7700 processor without using the GPU. In comparison, using standard Bayesian IRL to generate *one sample* from the posterior takes 10+ hours of training for a parallelized PPO reinforcement learning agent [@baselines] on an NVIDIA TITAN V GPU.
|
| 60 |
+
|
| 61 |
+
The previous section presupposed access to a pretrained latent embedding function $\phi: S \rightarrow \mathbb{R}^k$. We now discuss our pre-training process. Because we are interested in imitation learning problems, we need to be able to train $\phi(s)$ from the demonstrations without access to the ground-truth reward function. One potential method is to train $R_\theta$ using the pairwise ranking likelihood function in Equation ([\[eqn:pairwiserank\]](#eqn:pairwiserank){reference-type="ref" reference="eqn:pairwiserank"}) and then freeze all but the last layer of weights; however, the learned embedding may overfit to the limited number of preferences over demonstrations and fail to capture features relevant to the ground-truth reward function. Thus, we supplement the pairwise ranking objective with auxiliary objectives that can be optimized in a self-supervised fashion using data from the demonstrations.
|
| 62 |
+
|
| 63 |
+
::: {#tab:aux_losses}
|
| 64 |
+
------------------------- --------------------------------------------------------
|
| 65 |
+
Inverse Dynamics $f_{\rm ID}(\phi(s_t), \phi(s_{t+1})) \rightarrow a_t$
|
| 66 |
+
Forward Dynamics $f_{\rm FD}(\phi(s_t), a_t) \rightarrow s_{t+1}$
|
| 67 |
+
Temporal Distance $f_{\rm TD}(\phi(s_t), \phi(s_{t+x}) \rightarrow x$
|
| 68 |
+
Variational Autoencoder $f_{A}(\phi(s_t)) \rightarrow s_t$
|
| 69 |
+
------------------------- --------------------------------------------------------
|
| 70 |
+
|
| 71 |
+
: Self-supervised learning objectives used to pre-train $\phi(s)$.
|
| 72 |
+
:::
|
| 73 |
+
|
| 74 |
+
We use the following self-supervised tasks to pre-train $R_\theta$: (1) Learn an inverse dynamics model that uses embeddings $\phi(s_t)$ and $\phi(s_{t+1})$ to predict the corresponding action $a_t$ [@torabi2018behavioral; @hanna2017grounded], (2) Learn a forward dynamics model that predicts $s_{t+1}$ from $\phi(s_t)$ and $a_t$ [@oh2015action; @thananjeyan2019safety], (3) Learn an embedding $\phi(s)$ that predicts the temporal distance between two randomly chosen states from the same demonstration [@imitationyoutube], and (4) Train a variational pixel-to-pixel autoencoder in which $\phi(s)$ is the learned latent encoding [@makhzani2017pixelgan; @doersch2016tutorial]. Table [1](#tab:aux_losses){reference-type="ref" reference="tab:aux_losses"} summarizes the self-supervised tasks used to train $\phi(s)$.
|
| 75 |
+
|
| 76 |
+
There are many possibilities for pre-training $\phi(s)$. We used the objectives described above to encourage the embedding to encode different features. For example, an accurate inverse dynamics model can be learned by only attending to the movement of the agent. Learning forward dynamics supplements this by requiring $\phi(s)$ to encode information about short-term changes to the environment. Learning to predict the temporal distance between states in a trajectory forces $\phi(s)$ to encode long-term progress. Finally, the autoencoder loss acts as a regularizer to the other losses as it seeks to embed all aspects of the state (see the Appendix for details). The Bayesian REX pipeline for sampling from the reward function posterior is shown in Figure [1](#fig:BayesianREX){reference-type="ref" reference="fig:BayesianREX"}.
|
| 77 |
+
|
| 78 |
+
We now discuss how to use Bayesian REX to find an efficient solution to the high-confidence policy evaluation for imitation learning (HCPE-IL) problem (see Section [4](#sec:hcpe-il){reference-type="ref" reference="sec:hcpe-il"}). Given samples from the distribution $P(w \mid D, \mathcal{P})$, where $R(s) = w^T \phi(s)$, we compute the posterior distribution over any performance statistic $g(\pi_{\rm eval}, R^*)$ as follows. For each sampled weight vector $w$ produced by Bayesian REX, we compute $g(\pi_{\rm eval}, w)$. This results in a sample from the posterior distribution $P(g(\pi_{\rm eval}, R) \mid D, \mathcal{P})$, i.e., the posterior distribution over performance statistic $g$. We then compute a $(1-\delta)$ confidence lower bound, $\hat{g}(\pi_{\rm eval}, D)$, by finding the $\delta$-quantile of $g(\pi_{\rm eval}, w)$ for $w \sim P(w\mid D, \mathcal{P})$.
|
| 79 |
+
|
| 80 |
+
While there are many potential performance statistics $g$, we chose to focus on bounding the expected value of the evaluation policy, i.e., $g(\pi_{\rm eval}, R^*) = V^{\pi_{\rm eval}}_{R^*} = {w^*}^T \Phi_{\pi_{\rm eval}}$. To compute a $1 - \delta$ confidence bound on $V^{\pi_{\rm eval}}_{R^*}$, we take advantage of the learned linear reward representation to efficiently calculate the posterior distribution over policy returns given preferences and demonstrations. This distribution over returns is calculated via a matrix vector product, $W \Phi_{\pi_{\rm eval}}$, in which each row of $W$ is a sample, $w$, from the MCMC chain and $\pi_{\rm eval}$ is the evaluation policy. We then sort the resulting vector and select the $\delta$-quantile lowest value. This results in a $1-\delta$ confidence lower bound on $V^{\pi_{\rm eval}}_{R^*}$ and corresponds to the $\delta$-Value at Risk (VaR) over $V^{\pi_{\rm eval}}_{R}\sim P(R \mid D, \mathcal{P})$ [@jorion1997value; @brown2018efficient].
|
2004.02958/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:03bb4c50aafcb578859085d862a12ce6b0d89e83343a4dc437b7d391d40588b7
|
| 3 |
+
size 9288712
|
2004.10353/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9e15c5a3e0c13a3e08d1a334fde6a7951f4ed461498ee593b82034d38452cf6b
|
| 3 |
+
size 356791
|
2004.14243/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7053a01a99e613c960959181f37f996ab3bfd846f49ea1206e0af4a478035aa2
|
| 3 |
+
size 2922111
|
2007.07670/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4dd6bda6343be6e04aff538e644cb56ee88f5a3f7f8426cce2063b00cb29a92f
|
| 3 |
+
size 535955
|
2007.13143/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:52698fae2daa3d8b8747895310592be275a68a9eeb5ebcb03433f726a0836896
|
| 3 |
+
size 3086303
|
2010.02696/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f37bd1804c0b0c38ac46e2a5922667295484a608500485324b206242e1f6cb30
|
| 3 |
+
size 836909
|
2010.08258/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:f8837459aa8e93c3c16cf177f6766a89b8c8c3cd7e32bbe5b59103bb27a2060d
|
| 3 |
+
size 5989138
|
2010.09345/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d1876b24973ff9506eac3da60305b2b7e28720fdcf5677e958099d34f8fc1ac5
|
| 3 |
+
size 17613394
|
2010.10258/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0ebd077520fec6707b19f9ec057a38773d04c8f63f959b9384a3b1807086ef66
|
| 3 |
+
size 4720004
|
2010.13924/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fec5c54b67f31d6d1588001ce5bc1cbe45280151952a26706279243e5d1e929b
|
| 3 |
+
size 15301611
|
2011.03017/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8f2f7f7f9e66b90ce15299a1f8ea29f91223db508d3f6e52b12bfb2803280d28
|
| 3 |
+
size 1212848
|
2103.16886/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:bf56ae8d9b3eaaba9917e80643e39109143f4c0a41d5442fc47ed93d67067f58
|
| 3 |
+
size 3609623
|
2104.00380/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:cc532de6f824dc25204e7e152facfeb35646aabafdb66c67623fcfae734a16d9
|
| 3 |
+
size 2331797
|
2104.07058/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:72b8204b320d9e64f6278380e448810a6a2f767d16821c71cecacaa662898d41
|
| 3 |
+
size 690443
|
2105.08997/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d67b0e7e6e772580badce3fd43a7d40c4fc6a81d92279f40c1ae96af9b244a19
|
| 3 |
+
size 17710871
|
2105.09574/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-04-15T12:59:26.418Z" agent="5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/84.0.4147.89 Safari/537.36" etag="K-anpEIFHN0siPcOfmCJ" version="14.6.0" type="device"><diagram id="KgMyl053cNK_uFxMr-Sc" name="Page-1">7Vpdk9IwFP01jPqwTpNCYR93AXXGXXFkR13fAs22WUNTQxDw15vQ9JN0QbeUzsJTm5vkJjnn5DYfbdn92eo9R6F/y1xMW9ByVy170IIQ2B1LPpRlHVm6nW5k8DhxdaHUMCZ/sDbqet6CuHieKygYo4KEeeOUBQGeipwNcc6W+WIPjOZbDZGnW7RSw3iKKN4q9o24wo+sPdhN7R8w8fy4ZeBcRjkzFBfWjuc+ctkyY7KHLbvPGRPR22zVx1SBF+MS1XtXkpt0jONA7FNhIOjw48B9vGnf/Zjc3Y+cr133Aure/kZ0oUeseyvWMQScLQIXKy+gZV8vfSLwOERTlbuUpEubL2ZUZyM+1SR2ZUo7x1zgVWm3QQKGVBFmMyz4WhbRFWzoRFW0gNoazmXKBuhpm59l4lIbkVaAl7hOQZIvGqd/wazTdMygbe3CrF7IQNMRc3YCBpxaEWsbEHOobPb6gckhZaFzfi1YnHEx38ByJQsAJ1ylmfLNU8+hhEmNuw9if7J7kcuowBYxElSRR38uOPuJ+4wyLi0BC7BqnVBaMCFKvEAmp5IELO3XiiIiY+uVzpgR11XNGOlOBWFVwzEozIpLA8mWgWR4KI67pRzPQxT8P8dknuE2cvXSuU1Iagq3vQNx24cnx61tNYxb05rpHJsrjc0AHpvkeANS+QwefQYnN4WL4bkB9JqWpOcAXUmAbgC78ByiDx6iO0dn2T4voA8WoY/PbvkW+PkR2hI+UmFg8zG2+lJIliRHvrVPjvniAZuRedNx0eGYP9TG+BMrhu7ToLhbmNu9bYaTI696GD7U9vi8uDbTW3PoLt8hnxfXz11cH51dWMHWqW1iN/YyiQ1fRqO7V3ItZlH8IC58FLjKOXFxRgWTYu2TUUbhzsL02Y4vI+vRRfmma4ukvXdhRqEkuuBqWCXCyO/KTHp5qcKw4W5l9ECdyijfqFWsjFtEggi+SdICjzNfK928OUuklbm4fCp41PlRseEW2Nj18FgnGRc+81iA6DC1FmBJy9wwFmp+HrEQa31XjBaC5dnDKyK+q+pvOzp1n8kZrLTnTWKdQ1917mns5VjYgk/xHhFTIO5hsbPgNpscUyTI73xPqudmj+kL9po/sjESzstUniGmComDvMQN10awVoWbfhspoAgbj6LpbLdeGJ3dMNrNh9EQcOuFsfyMJUGs3XwYTScZ1cAok+m/eZu8zB+O9vAv</diagram></mxfile>
|
2105.09574/main_diagram/main_diagram.pdf
ADDED
|
Binary file (19.8 kB). View file
|
|
|
2105.09574/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Method
|
| 2 |
+
|
| 3 |
+
The dataset introduced by Wisniewski et al. [@DBLP:journals/ws/WisniewskiPLK19] (further referenced as [CQ2SPARQLOWL]{.smallcaps}) and their analysis on how CQs are constructed and how they are related to provided SPARQL-OWL queries was our main guide on how to relate CQs to queries. Because in [BigCQ]{.smallcaps} SPARQL-OWL query templates are produced from axiom shapes by applying simple substitutions of IRIs with variables followed by preamble and postamble adding and because the ontologies used to generate axiom shapes are disjoint with ontologies in [CQ2SPARQLOWL]{.smallcaps}, we used all SPARQL-OWL queries from [CQ2SPARQLOWL]{.smallcaps} to measure the coverage of [BigCQ]{.smallcaps} query templates.
|
| 4 |
+
|
| 5 |
+
However, [CQ2SPARQLOWL]{.smallcaps} was used intensively to observe how existing queries relate to real CQs. Because of that, in order to evaluate the coverage of CQ templates we used another dataset of CQs to check how well our CQ templates cover unseen, real CQs. The [CORAL]{.smallcaps} dataset introduced by Fernandez-Izquierdo et al. [@10.1007/978-3-030-21348-0_29] is the biggest compilation of requirements expressed as either CQs or declarative sentences. The dataset overlaps with [CQ2SPARQLOWL]{.smallcaps}, so we used only CQs that were not present in [CQ2SPARQLOWL]{.smallcaps} as an evaluation set to calculate the coverage of CQ templates from [BigCQ]{.smallcaps}.
|
| 6 |
+
|
| 7 |
+
:::: center
|
| 8 |
+
::: {#datasetsizes}
|
| 9 |
+
Dataset Number of CQs
|
| 10 |
+
-------------------------------------------------------------------------- ---------------
|
| 11 |
+
[CORAL]{.smallcaps} (all CQs + sentences) 834
|
| 12 |
+
[CORAL]{.smallcaps} (all CQs) 469
|
| 13 |
+
[CORAL]{.smallcaps} (all CQs that are not in [CQ2SPARQLOWL]{.smallcaps}) 324
|
| 14 |
+
[CQ2SPARQLOWL]{.smallcaps} (all CQs) 234
|
| 15 |
+
[CQ2SPARQLOWL]{.smallcaps} (all CQs with SPARQL-OWL queries defined) 131
|
| 16 |
+
|
| 17 |
+
: The size of requirement datasets involved in [BigCQ]{.smallcaps} construction and evaluation.
|
| 18 |
+
:::
|
| 19 |
+
::::
|
| 20 |
+
|
| 21 |
+
[]{#datasetsizes label="datasetsizes"}
|
| 22 |
+
|
| 23 |
+
In this section we describe the process of transformation of each axiom shape into a pair of CQ template and SPARQL-OWL template. To achieve this goal, we first verbalize each axiom shape to obtain statements which can be easily transformed later into questions. Then each axiom shape is transformed into SPARQL-OWL query templates by adding appropriate preamble and postamble and replacing IRIs with variables.
|
| 24 |
+
|
| 25 |
+
Using ACE verbalizer [^4], we verbalized each of the 239 axiom shapes into natural language statements. An example OWL axiom shape from the dataset:
|
| 26 |
+
|
| 27 |
+
<http://example.ns#OP1> a owl:ObjectProperty .
|
| 28 |
+
<http://example.ns#C1> a owl:Class .
|
| 29 |
+
<http://example.ns#C2> a owl:Class ; rdfs:subClassOf [
|
| 30 |
+
a owl:Restriction;
|
| 31 |
+
owl:onProperty <http://example.ns#OP1>;
|
| 32 |
+
owl:hasValue <http://example.ns#C1> ; ] .
|
| 33 |
+
|
| 34 |
+
is verbalized by ACE verbalizer into `Every C2 OP1 C1` statement.
|
2105.09803/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8b877d0b5c1919fca686d63978915b77726af9fd706d94051879cc057595b875
|
| 3 |
+
size 12425858
|
2106.06959/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:a5fb504d55e907810633936831508d291208fc64506835f1c3c74019f0d88950
|
| 3 |
+
size 42869370
|
2107.01396/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:638343aa3f0d18db20005864126ca8a8c15aabf1979a8676b897b38005b5f93e
|
| 3 |
+
size 5517144
|
2108.07520/main_diagram/main_diagram.drawio
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
2108.07520/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Efficient 3D mesh manipulation with high-fidelity generative models is crucial in the computer graphics field and enjoys great potential in various practical applications, ranging from 3D human activity understanding, 3D augmented reality to robotics. In this work, the focus is made to transfer the 3D pose style from the source mesh to the target mesh in an unsupervised manner. Although a few attempts have been proposed for this task, due to several issues, limitations still exist in the state-of-the-art methods.
|
| 4 |
+
|
| 5 |
+
<span id="page-0-0"></span>
|
| 6 |
+
|
| 7 |
+
Figure 1: Visualized latent pose space distribution learned by our IEP-GAN. Dots in each color stands for a latent code of a pose class from FAUST dataset [\[2\]](#page-8-0). We observe the clusters in the latent space fit their pose representations, suggesting that the IEP-GAN has generalized the cohesive ability of projection.
|
| 8 |
+
|
| 9 |
+
To date, strict constraints of re-enforced correspondences of meshes are inevitable in almost all the existing methods [\[26,](#page-8-1) [8\]](#page-8-2). However, to provide those correspondences needs either extra manual efforts or specific requirements for data. Although some existing works claimed that they can achieve 3D pose deformation in an unsupervised setting, a constraint on the training datasets is still needed that different poses performed by the same subject should be given to successfully disentangle the shape and pose information [\[4,](#page-8-3) [39\]](#page-9-0). This constraint actually serves as a strong supervision with manually labeled prior. Thus a real unsupervised setting for pose transfer learning is not achieved in any existing work yet. Besides, compared to 2D images that have fixed rigid permutation [\[29,](#page-8-4) [38,](#page-9-1) [27,](#page-8-5) [28\]](#page-8-6), 3D meshes are embedded in continuous space with arbitrating orders and complex geometric attributes, which makes the task more challenging. This innately structural dif-
|
| 10 |
+
|
| 11 |
+
<sup>\*</sup>Corresponding Author.
|
| 12 |
+
|
| 13 |
+
<span id="page-1-0"></span>ference from 2D images precludes the standard discrete convolution operator to be immediately applicable to 3D meshes/points. A model specifically designed for geometric characters is desirable [\[14\]](#page-8-7). Lastly, intrinsic (i.e., geodesic) distances are powerful metrics for learning latent representations of deformable 3D shapes [\[16,](#page-8-8) [3,](#page-8-9) [2\]](#page-8-0), but conventional geodesic-based methods suffer from the intensive computations, which makes them unsuitable for learning on largescale datasets.
|
| 14 |
+
|
| 15 |
+
*Learning unsupervised 3D pose transfer without any constraints needed on training sets*, this target drives our current research efforts. To this end, we propose a GANbased framework motivated by three key observations. First, a variety of deep generative models are proposed to encode and regenerate pose-transferred meshes by disentangling pose and shape information. However, extra constraints on the training data are inevitable in all the existing methods, otherwise the shape and pose latent representations cannot be successfully disentangled, which leads to degenerate solutions. In an ideal case, a model should be able to learn the shape and pose representations without any enforced constraint on training data, see Fig. [1.](#page-0-0) Second, to obtain the geodesic distance priors for the intrinsic preservation of meshes, various differentiable intrinsic metrics are adopted, but they either need intensive computational costs or large-scale training sets [\[11,](#page-8-10) [31\]](#page-8-11). At last, to our knowledge, there is no existing deep generative model proposed specifically for extrinsic information learning on 3D data.
|
| 16 |
+
|
| 17 |
+
In this paper, we propose a novel Intrinsic-Extrinsic Preserved Generative Adversarial Network (IEP-GAN) (see Fig. [2\)](#page-2-0). Two-branch discriminators are introduced to liberate the learning from the needs of data constraints. A global-branch discriminator is introduced to substitute the ground-truth meshes by enforcing generated meshes to converge to realistic ones. Besides, an extrinsic-branch discriminator is incorporated to enhance the pose style learning through the co-occurrence statics of the Laplacians of the meshes. Furthermore, a geodesic-adaptive sampling strategy is introduced to compute a regional geometricpreserved loss instead of the global geodesic prior, which can form an effective geometric regulation as intrinsic preservation while avoiding the heavy computations.
|
| 18 |
+
|
| 19 |
+
To summarize, the novelties can be listed as below:
|
| 20 |
+
|
| 21 |
+
- To the best of our knowledge, the proposed work is the first that can achieve unsupervised 3D pose transfer without any human supervision and the intrinsic-extrinsic preserved generative adversarial network is the first GANbased framework for 3D human pose learning.
|
| 22 |
+
- The IEP-GAN consists of newly proposed two-branch discriminators. The global branch enhances the generative capacity of the model and substitutes the need for the manual encoding of the training data. The extrinsic branch utilizes the Laplacian to strengthen extrinsic geo-
|
| 23 |
+
|
| 24 |
+
- metric learning in a co-occurrence static way.
|
| 25 |
+
- At last, a regional geometric-preserved loss is proposed to preserve the regional intrinsic priors via a geodesicadaptive sampling strategy, which guarantees both an effective geometric regulation and substantial improvements in the computational efficiency.
|
| 26 |
+
- Experimental results on four different datasets, i.e., DFAUST [\[3\]](#page-8-9), FAUST [\[2\]](#page-8-0), ANIMAL [\[26\]](#page-8-1), and MANO [\[23\]](#page-8-12), show that the proposed method achieves the new state-of-the-art performances with satisfying visual qualities. Moreover, we further represent the possibility of using the resulting embedding space for the 3D human mesh manipulation, such as the smooth pose transfer, interpolation and swapping of the given shapes.
|
| 27 |
+
|
| 28 |
+
# Method
|
| 29 |
+
|
| 30 |
+
This section first presents a general introduction of the whole IEP-GAN framework. Then, each component of IEP-GAN and their functions will be indicated. At last, designing details of the network will be presented.
|
| 31 |
+
|
| 32 |
+
The encoder E used for processing the input meshes is the classical PointNet [20]. Besides, each convolutional layer is stacked by InstanceNorm layers instead of a batch normalization operator for preserving the instance features which is critical for the style transfer task. The pose code
|
| 33 |
+
|
| 34 |
+
<span id="page-5-2"></span><span id="page-5-0"></span>Table 1: Comparison with state-of-the-art methods on the FAUST and DFAUST datasets. Interpolation error measures the intrinsic preservation ability. Disentanglement error measures the extrinsic preservation ability. Results with the best performance are in bold font. Results of our method are marked with underline. Note that our IEP-GAN is trained without the need of data constraints which is used in the compared methods.
|
| 35 |
+
|
| 36 |
+
| Interpolation Error | | | | | Disentanglement Error | | | |
|
| 37 |
+
|---------------------|---------|--------------|--------------|----------------|-----------------------|--------------|--------------|----------------|
|
| 38 |
+
| Method | VAE [1] | LIMP Euc [4] | LIMP Geo [4] | IEP-GAN (Ours) | VAE [1] | LIMP Euc [4] | LIMP Geo [4] | IEP-GAN (Ours) |
|
| 39 |
+
| FAUST | 3.89e-2 | 5.08e-3 | 3.82e-3 | 4.02e-3 | 7.16 | 4.04 | 3.48 | 0.19 |
|
| 40 |
+
| DFAUST | 9.82e-2 | 3.43e-3 | 2.89e-4 | 3.16e-4 | 6.15 | 4.90 | 4.11 | 0.34 |
|
| 41 |
+
|
| 42 |
+
is fixed as 512 dimensions and the shape code is the raw input shape mesh for the pose transfer task and 2048 dimensions for the disentanglement. For the generator, we fuse the architecture of [\[17\]](#page-8-13) and [\[31\]](#page-8-11). In particular, we adopt the GAN framework and network structure of [\[17\]](#page-8-13), and change its Residual blocks into the SPAdaIN ResBlock introduced in [\[31\]](#page-8-11) which is specifically designed for 3D style transfer task. The discriminator structure is a symmetry of the generator except that the last two layers of the discriminator become fully-connected layers to produce the prediction of real or fake meshes. The general structure of the Laplacian co-occurrence discriminator is identical to [\[17\]](#page-8-13) while we replace its encoder with ours mentioned above for 3D meshes. Each Laplacian will be down-sampled via the Poisson Disk method [\[36\]](#page-9-5). Please see Supplementary Materials for a detailed specification of the whole architecture, as well as details of the hyper-parameter settings.
|
2109.04518/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7af82a04153f18ddb514ff09b448972710558dd42a263902d2325ead685ec734
|
| 3 |
+
size 3596115
|
2109.04712/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:af3f8293890b25f323267fd121e22c4e42c6498c4768dcd17be236d08b880d7d
|
| 3 |
+
size 694401
|
2109.05361/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1413b965bb6597d21c85b65a0a594951d68c5388c89f73ae070109ea62e1c04e
|
| 3 |
+
size 764791
|
2109.11593/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:89297bf2af989e2ce834b64d07b60e558eb12e582c432de90bf7957291cb567b
|
| 3 |
+
size 5602793
|
2110.05055/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:b516b07e1eb098e6ed8d396464d3e7deb8b33067e5384ee4c67c23a9ea6dc88c
|
| 3 |
+
size 1765511
|
2110.05419/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:20337a9826d0f316f5d08fd7869476d8039c3b14246529c2046e797221d01e4f
|
| 3 |
+
size 655627
|
2110.06176/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-10-05T16:12:10.287Z" agent="5.0 (X11; CrOS x86_64 14092.66.0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/93.0.4577.95 Safari/537.36" version="15.4.1" etag="cyYqpM4YxWW867R-6XBz" type="device"><diagram id="IhPWtElp5GIzmKqWgod-">7V1bk5s4Fv41rtp5iEvizmNfJ1ubnklVpiazj7Qt22xj8AKdbs+vHwkkLpIw0JaI3W0nlYAQAvSdc3T06RyYmTfb11/TYLd5SJYomhlg+Tozb2eGYZiei/8jJfuyBEIDlCXrNFzSsrrgW/g3ooWs2nO4RFmrYp4kUR7u2oWLJI7RIm+VBWmavLSrrZKofdVdsEZCwbdFEIml38NlvilLPRvU5Z9RuN6wK0NAj2wDVpkWZJtgmbw0isy7mXmTJklebm1fb1BEeo/1S3nefcfR6sZSFOdDTjDKE34E0TN9Nnpf+Z49bJo8x0tE6oOZef2yCXP0bRcsyNEXjC8u2+TbCO9BvJnlafJUdYqFS5ZhiiEIk5jURxm+q+tVEucUVOjS/ZskStLigqYHyB9yapBtigtDuvM1yHOUxkWJAerryU6mD4bSHL12dg6suhwLK0q2KE/3uAo9wbXLM6iYGsChqL3UoJsmrbRpAu6bVNiooK2rtmss8AaFQw6NOQYa2A8N6eX7YBtG5HFukuc0RClu7jf0wkHiiZD4xY+Uh1HEyuMkRmNQakpFkC7o5cBBFIv7og8MFEBa6SgzPb5nCpharFILU2a1jsHUerfqphoox2gDZQATCkBBzxeBqozaMUDZEqCciPVmCzHn/88JO/ApK/r5inSXv3stzBA7jrfW5P/vm4A0EGZk1NqgGXnQsunHo9rlbnBRgVRXNE3kLP1Fs+iYC9rVNbNdEB/VVhwQmQ2iMN/P7FvWLEapbLn9hLi4fMiqONgSxYgfs1272iOrk6yE3p6un9TAq7C3NyhNBnXz48iuf9kk+LpPeJDAhuQ9dPWwPlCDypn21gNaPmdBW5x6+8y8H6vj3DCJR5a8PRZi47EmY9UCjzDYszGvyfgTYqf9ih7YhsslOf06RfixgseIuR+7JIzzYrDAwOPnwG0950lGh8z+QdXnxj9DwfjnGXDOuZ8mEN1P25C4KiY4fgB0OgfAw3K+oh7mVan+pXWgFRaV0xljp5M7SOoX//Jy52K56xTdpuj8kQZxtkrSLUqvo2Tx1C9h7eJjHLGmX0z85asb9/pe5jCB4tcvU+6M9+7gTIc77HHusC9xh01DlDFHgYy5qmc4bRCui99BEBLcFnE5sO98Opi4vOfrAGYKGqiwuURL8X1W8RhYvAssElh84LwVFkuBrvgXUGSgWPBngsL4SF2ouO7V1e3t+aHiG0NQMSWuixJU4EVXJKhAYA+CRZuyjOKaPxAsjIH8SbAo55lH27DTwAHCQQO8Nqs1ihv+OOoBnZ+rHjIm+AILNMCbZylKYJHxE2dgtTgs1cPiDIJFmxGTTelbrOaqBKBmdNorkeOIzcHU0L/jMA+DaDxDdJCD/CAMEc9DGowPanFEEolSwRHBCxsxiCSCknVsbcb3QkYMYoigJyqKLkwMzVTEuWLCE0RT6omhmYh4L/SQDBNdHgqD4KInPeTQpIqimYM4W1B4amhSUAYQEmi5RuyJkzTfJGsSxHNXl163feS6zpck2dFu+x/K8z3tWbLwPlMWtlhFp6F4eUXifnHxIgqyLFz8sQnj8sB9GLFrYVzS/V/kXrHPS3f/S2+92Ll9be3tZ834xR5RIH3VEoQMPwydDFUBZ3mQrhEFsQ4n7hWYFEVBHv5oty8DvzgVd0Wwb1Sg8Q91y19JQZODMYVRlAsrFs8Q/FPACV95F7UoVo8zTDo18zLnS1tOOppqpmHO1XALpOWkhvsSVzGMspwUFM08xpnOBUTCclLzJSMyFEdzHh2K+hWlGXrOOgMswUtAQsmDGcmWQkVs+a8pQk9Ff6UkHnc+nw8NeusOq4zQihw576BKm+cygeOK0iYTNxUxlayNE3OgZXk+Qx3m1zD/i7nEeLtwlueW69P92l8mO/vGDpbqEHcgCdS9rdO1DvvImtxhwXu1BHLV4doob4uedsAPdrDygcbP7mm3dP+Fdt/gH5si91SYAVXJJnZf3Dc1S1kUhMSQPhJcmCn7YFbH4a0OtBzJGKfL6pzmtP0Iq9NhK/Qbhh51FlrsUGfR4EDQ01KHwXmLYZBxbYpMwmd8/Fu43WVYvTrtQsi8lVW4KJO+8PZiE6TBIi9kpLBTH8tCCH6JASQWwtJlIYYwfbVq0rzfBhb99kScSdQqXTJoQZrLdb841NB+fpJS+SBz17MafsgnoquswnBHZCxzZw0Gu5m2I0GSlR1po2zHnxuWX/2ExXv/ba4MBFgzDWEg41rrsHhjGUfH4QhHo4dw9Hi+cfQJ7FFUEZRmbwqxylAYLIBDs6R+f7ijwS+4Lwzw5f0k4hlaQqog5HOkbJGhgK4u2+y8N+9NPmd0zbOeMvZN9CCc+75glEf7icLElAXVa/ATu0P5jubGqnmghLtS5oyWJFinG3rhxvpmqcCXcGO68o1NkR8v+QpKM8j4BPlrH7QTrvxlP5aYCFMV6EqmKlCXmPgnORyyHjdm/ALMEq2C50KIxBkUPwU6NoiAzQ/1D3eu73NC4HBvvFLHZLKn0rVydnfneY4jADdypX+TpOHfuCyQv5pJx9qZyzmmBnBFx9SRKKKhQBFZXOUJK+JAf7RLXyWUhdRztQ86rqN1eDKXFeJBUOaUVirN2+uhXqrrugfbZZluPT7rWM5AtEn0Qp33KZxgqo1RYlietuWawlDxHQ2s6QyVeU6G6oBncCoWynZP3suQmCRuqIQsBl39xNkS2fRxKxrTv8DK1aD0jmUJMwVDVHtZrIUStRfp14+IgmvzgYhTYtD9EjFdFPihSKyhtFVnQw+40zF6D2ibkI640OaH1J8XPCB5e5021twSyctT134dZE1F4tbaPyy4UgkGsojXE8gFhzZ4wCd8D5/CHVqGhFokep2TftuWGp51cYvnr+xVmozajD5O101R17VFVVo/hRJ8a6rQlOlMxzOKLCahmZbEXhik0v8f/FpuGRd42KbwLx3VYlNKl+Aqz0v78SXYkyteXi7Rm+DIv+W9+hJIMyXDAHP2KQbVGQC25hRtnRSN7DLqZ3GGsCzoGqJxr148qnoOwdKSTxugLjj0q5AEIKxVUwKkOXd7aoDqLClby7yIkcIVWvakaGl+y9x7VCdP/PCIRoA05zW/d3Wq3v46DVqa8591uQ+aURFWpmWo6FrwsTWnP5/Pqhu/5iExZNpA0Jzu/F4UQzb6a8OkO9t5NCF3cCr8H3RW7Lj2lTBDnENZ4jTX08SPOeM5k7fB/mdxgQvwNfACLyqBHerK+2QpNCcW+HBM5gDjXCHL7KpY17nrVDRsB/Na7PE5BHXyGADGrBk04XlsX0dGQpNQHZ5BdmySgsWxbcAz5t4bQyqEXARZY+qiKhzRfzh3aW6IntkSPdM4HLFz7skwPlAndbLGFEpdd7K0bHRTMp5K1iXopwDJmkS6fvwX7gL8F988aGz9Mhv1AbWvKfqUp0EYkw81HhyyH9Op7onrxkmuSWM67uJFsmyt2bTdlL5ihV+Sa5qczyj6gYi3w3krtmh+BsyIJHNOflZ1e+uRhcd1GixDVLdPwxJVO0iWMCBJ31YPpd9j9sf7SHi3/rR6aQ7qL9Sbd/8A</diagram></mxfile>
|
2110.06176/main_diagram/main_diagram.pdf
ADDED
|
Binary file (72 kB). View file
|
|
|
2110.06176/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
Neural models have greatly advanced the state of the art in natural language processing and generation tasks. Accordingly, there has been increasing interest in applying neural language models to tasks which require extensive world knowledge to solve (Petroni et al., 2021). Much of this world knowledge can be found distributed over text corpora, which raises the question whether language models pre-trained on text corpora capture this information. Recent work suggests that while language models may successfully predict facts about the world (Petroni et al., 2019) such knowledge is superficial and unreliable (Cao et al., 2021). Our goal is to reliably incorporate information from across a text corpus into a language model.
|
| 4 |
+
|
| 5 |
+
Recent work has represented the information present in a text corpus explicitly by constructing a virtual knowledge base (VKB) (Dhingra et al., 2020; Sun et al., 2021). A VKB consists of dense representations of entity mentions in the text, designed to reflect the property or relation expressed by the entity mention. We propose to incorporate a VKB into a language model by using it as an external memory, performing attention over the entire VKB *within* a Transformer model. In this way the model can synthesise and reason over many disparate sources of information from the text corpus. We refer to the VKB used in such a way as Mention Memory, and the model as TOME (Transformer Over Mention Encodings). We first pre-train a mention encoder to specifically encourage mention representations that are useful for a Transformer model, and construct a Mention Memory from 150 million entity mentions in English Wikipedia. Then we train a TOME model with attention layers over the Mention Memory, which is kept frozen (see Figure 1).
|
| 6 |
+
|
| 7 |
+
We argue that the Mention Memory approach has several appealing properties. First, TOME retrieves entity mention representations corresponding to specific entity attributes or relations described in the
|
| 8 |
+
|
| 9 |
+
<sup>\*</sup>Equal contribution. Correspondence to {msdejong,yury.zemlyanskiy}@usc.edu. Work primarily done at Google Research. Code is released at https://github.com/google-research/language/tree/master/language/mentionmemory
|
| 10 |
+
|
| 11 |
+
<span id="page-1-0"></span>
|
| 12 |
+
|
| 13 |
+
Figure 1: Overview of Mention Memory. A pre-trained mention encoder is used to generate dense representations for each entity mention in Wikipedia (approximately 150 million total) which are stored in a table. The TOME model takes a passage annotated with entity mention boundaries as input, and applies a Transformer block. Next, the TOME model applies one or more TOMEBlocks. Each TOMEBlock contains a memory attention layer and a Transformer block.
|
| 14 |
+
|
| 15 |
+
corpus. This retrieval is much more fine-grained than aggregate entity retrieval methods such as Entities as Experts (EaE) [\(Fevry et al., 2020\)](#page-9-2), and we show large improvements in accuracy over EaE ´ on tasks that require detailed entity information, such as claim verification and entity-based question answering. The fine-grained retrieval also allows potential users to see more precisely what knowledge the model's predictions is based on (see Table [4\)](#page-8-0). *Second*, TOME retrieves dense representations, which are easy to incorporate into a Transformer model without reprocessing the input, unlike raw text. Therefore, TOME is able to retrieve, assimilate and reason over information from many different sources within a *single* Transformer model, allowing for multi-source and multi-hop reasoning without the beam search machinery that is required for multi-hop retrieve-and-read [\(Zhao](#page-10-3) [et al., 2021\)](#page-10-3). This also makes TOME much more scalable: retrieve-and-read approaches have to read many retrieved passages which becomes expensive with larger reader models, while the cost of memory layers does not scale with reader size and is negligible for larger readers. *Third*, the retrieval is latent, without direct or distant supervision on the retrieved results. We show that, even without supervision, the model learns to retrieve highly specific and informative entity attributes and perform multiple reasoning steps. *Finally*, the memory table is semi-parametric, so knowledge can be added or updated by applying the mention encoder to new text without retraining.
|
| 16 |
+
|
| 17 |
+
In order to verify the model's capacity to capture accurate factual information in the corpus, we start by evaluating TOME on the HoVer [\(Jiang et al., 2020\)](#page-9-3), FEVER [\(Thorne et al., 2018\)](#page-10-4) and FM2 [\(Eisen](#page-9-4)[schlos et al., 2021\)](#page-9-4) claim verification datasets, on which it strongly improves performance over entity aggregate and comparable retrieve-and-read baselines. We demonstrate that the model learns to attend to informative mentions for verifying claims using only the verification accuracy as a signal. Ablations show the memory is crucial for performance, and that the model can effectively use larger memory than it was pre-trained on. In a second set of experiments we evaluate TOME on question-answering benchmarks TriviaQA [\(Joshi et al., 2017\)](#page-9-5), ComplexWebQuestions [\(Talmor &](#page-10-5) [Berant, 2018\)](#page-10-5) and EntityQuestions [\(Sciavolino et al., 2021\)](#page-10-6), improving performance over comparable baselines. Finally we show that the model can be adapted to generalize to new unseen entities by updating the memory, without retraining.
|
| 18 |
+
|
| 19 |
+
# Method
|
| 20 |
+
|
| 21 |
+
Our method represents knowledge in a corpus as a collection of "*mention encodings*" – dense vector representations for every entity mention that appears in the corpus. Every time an entity appears in a passage – "*[Barack Obama] was elected president in 2008*" – some property of the entity or its relation to other entities is described. The first component of our method, the Mention Encoder model, is responsible for distilling information from entity mentions in the corpus into high-dimensional mention encodings. We use the Mention Encoder to encode each entity mention in English Wikipedia and gather encodings into a *Mention Memory*. The purpose of the Mention Memory is to capture all knowledge contained in the corpus in a way that can be easily integrated into a Transformer. The second component of our method, the TOME model, applies sparse attention over the Mention Memory to incorporate external information from the corpus into a Transformer model. An overview of the whole method is shown in Figure [1.](#page-1-0)
|
| 22 |
+
|
| 23 |
+
Jointly training the Mention Encoder and TOME models is computationally costly, since it would require backpropagating through the Mention Encoder for each attended mention. Consequently, we propose to train the models in two stages. First, we pre-train the Mention Encoder and generate the Mention Memory. Second, we pre-train the TOME model while keeping the Mention Memory frozen: the gradient does not propagate through it and the memories are not modified. Mention Encoder pre-training is specifically designed such that mention encodings capture relevant contextual information about each mention and are useful for TOME even without joint training. We formally define these models in sections 2.1 and 2.2, and their pre-training procedures in 2.3 and 2.4.
|
| 24 |
+
|
| 25 |
+
**Notation.** An input to the model is a passage $\mathbf{x} = x_1, \dots, x_T$ of length T. We assume that each passage has been annotated with an NER system. Following Baldini Soares et al. (2019) we use special entity markers to highlight entity mentions in the passage. We introduce tokens $[E_{start}]$ and $[E_{end}]$ to the vocabulary and insert them before and after each mention in the passage. For example, the original passage "What is the nationality of the hero who killed Medusa" turns into "What is the $[E_{start}]$ nationality $[E_{end}]$ of the $[E_{start}]$ hero $[E_{end}]$ who killed $[E_{start}]$ Medusa $[E_{end}]$ ". Each mention m in a passage is described by a tuple (s,e), where s and e are start and end positions of the mention. We consider entity markers to be part of the corresponding mention, so that $x_s = [E_{start}]$ and $x_e = [E_{end}]$ . Representations of these tokens are later used to generate mention encodings.
|
| 26 |
+
|
| 27 |
+
Let $H \in \mathbb{R}^{T \times d}$ be token representations where d is the hidden dimension, such that $H_i \in \mathbb{R}^d$ is the contextualized embedding for the i-th token. Following Févry et al. (2020) we compute the encoding of a span (s,e) as a learnable linear projection W of the concatenation of its start and end token representations $H_s$ and $H_e$
|
| 28 |
+
|
| 29 |
+
$$SpanEncodingLayer(H, (s, e)) = W[H_s; H_e]$$
|
| 30 |
+
(1)
|
| 31 |
+
|
| 32 |
+
The Mention Encoder is a Transformer model with two final SpanEncodingLayers that produce key and value mention encodings. Value mention encodings store context-level information about each mention and are used as inputs to the TOME model. Key mention encodings identify the type of information stored in the value encodings and serve as attention keys for the memory layer. These two SpanEncodingLayers do not share weights.
|
| 33 |
+
|
| 34 |
+
After the Mention Encoder is pre-trained (see section 2.3), we use it to generate a Mention Memory from entity mentions in Wikipedia. While we could include encodings of any corpus mention in the Mention Memory, we focus on grounded mentions which can be linked to Wikipedia entities. We denote these as linked mentions, which we hypothesize contain information that can be retrieved and grounded. We gather mention encodings into matrices MemKey $\in \mathbb{R}^{N \times d_K}$ and MemValue $\in \mathbb{R}^{N \times d_V}$ , where N is the total number of linked entity mentions in English Wikipedia (approximately 150 million) and $d_K$ and $d_V$ are dimensions of key and value encodings. Additionally, we record entity (Wikipedia) IDs of mentions in MemEnt $\in \mathbb{R}^N$ , which we use as labels for auxiliary losses, not as inputs to the model or supervision on retrieval. MemKey(i), MemValue(i), MemEnt(i) correspond to the key encoding, value encoding and entity ID for the i-th linked mention in Wikipedia.
|
| 35 |
+
|
| 36 |
+
The TOME model incorporates information from a text corpus into a Transformer by applying sparse attention over the Mention Memory. The model consists of one or more TOMEBlocks, each containing a memory attention layer followed by a post-processing Transformer block. Memory attention layers retrieve and attend to relevant "memories" for every mention in the input passage. The model then processes the retrieval-augmented representation with the Transformer block, allowing it to access and combine information from multiple sources in the corpus. Finally, multiple TOMEBlocks enable the model to refine retrievals and perform multi-hop reasoning. More formally, a TOMEBlock receives the output representation of the previous layer H and produces new representations H'
|
| 37 |
+
|
| 38 |
+
$$M = MemoryAttention(H), (2)$$
|
| 39 |
+
|
| 40 |
+
$$H' = \mathsf{TransformerBlock}(M) \tag{3}$$
|
| 41 |
+
|
| 42 |
+
The TOME model encodes input passages x with the word embedding layer and initial Transformer block and then applies one or more TOMEBlocks
|
| 43 |
+
|
| 44 |
+
$$H^0 = Initial Transformer Block(Token Embedding(x)),$$
|
| 45 |
+
(4)
|
| 46 |
+
|
| 47 |
+
$$H^l = \texttt{TOMEBlock}_l(H^{l-1}), \ l = 1 \dots L$$
|
| 48 |
+
(5)
|
| 49 |
+
|
| 50 |
+
In this work we consider two configurations of the TOME model: TOME-1 and TOME-2, with one and two TOMEBlocks respectively. Each TOMEBlock of TOME-2 contains half as many Transformer layers as in TOME-1 to hold the total number of Transformer layers fixed between models.
|
| 51 |
+
|
| 52 |
+
Each memory attention layer is implemented as a sparse dot-product attention layer that takes the output H of the previous Transformer block, incorporates information from the Mention Memory, and returns a representation M (omitting layer indices). Consider a mention m that starts at position s and ends at position e. We start by computing its *query mention encoding* Query(m) by applying a SpanEncodingLayer
|
| 53 |
+
|
| 54 |
+
$$\mathtt{Query}(m) = \mathtt{SpanEncodingLayer}(H, (s, e)), \tag{6}$$
|
| 55 |
+
|
| 56 |
+
Query mention encodings are used to retrieve relevant memories from the Mention Memory table. However, applying standard attention over 150 million mention encodings is infeasible. Instead, we first perform approximate nearest neighbor search to retrieve the top-K mentions with the largest dot product between query Query(m) and key mention encoding from MemKey. We denote the set of these memories as TopMem(Query(m)). We compute attention over these memories and incorporate the result into the token contextual representation at position s
|
| 57 |
+
|
| 58 |
+
$$\alpha_i \propto \exp(\mathsf{Query}(m) \cdot \mathsf{MemKey}(i)), \ i \in \mathsf{TopMem}(\mathsf{Query}(m))$$
|
| 59 |
+
(7)
|
| 60 |
+
|
| 61 |
+
$$Value(m) = \sum_{i \in TopMem(Query(m))} \alpha_i \cdot MemValue(i)$$
|
| 62 |
+
(8)
|
| 63 |
+
|
| 64 |
+
$$M_s = \texttt{LayerNorm}(H_s + W_U \texttt{Value}(m)) \tag{9}$$
|
| 65 |
+
|
| 66 |
+
where W<sup>U</sup> is a learnable matrix of shape d × d<sup>V</sup> .
|
| 67 |
+
|
| 68 |
+
Approximate nearest neighbor search (ANNS) can be performed cheaply using one of multiple ANNS libraries, for example ScaNN [\(Guo et al., 2020\)](#page-9-7). We implemented two on-device search methods to avoid the engineering complexity of real-time communication with an ANNS server, though we have verified this is also viable. The first naively computes a simple dot-product between passage queries and memory keys, and was used in our main experiments as it was easiest to implement. We also implemented and will be releasing a much faster version based on CPU ANNS methods. The memory is sharded over devices, so that the device-memory overhead is negligible.
|
| 69 |
+
|
| 70 |
+
Holding the number of entries in memory fixed, the compute cost of retrieval from memory does not grow with the size of the reader or the dimensionality of the memory values, so that the relative cost of the memory layer becomes smaller with reader size. In particular, the overhead from the memory used in our pre-training setting is small for BERT-Large and up. More details on ANNS implementation and overhead can be found in Appendix [C.](#page-13-0)
|
| 71 |
+
|
| 72 |
+
While backpropagating through a Wikipedia-scale mention memory is challenging, it is possible to train smaller-scale memory architectures end-to-end. We take an approach inspired by MARGE [\(Lewis et al., 2020a\)](#page-9-8) and READTWICE [\(Zemlyanskiy et al., 2021\)](#page-10-7) which apply crossattention over documents within a batch. In particular, we process passages in each batch twice. As a first step, the Mention Encoder model generates mention encodings from each passage and aggregates the mention encodings into a batch-wide memory table. In the second step, we apply a TOME architecture that attends to the batch memory, which we call BATCH-TOME. Note that BATCH-TOME is just used for pre-training the Mention Encoder and not evaluated on any downstream tasks. Mention Encoder and BATCH-TOME are jointly trained end-to-end so that the Mention Encoder is encouraged to produce mention encodings that contain useful information for BATCH-TOME.
|
| 73 |
+
|
| 74 |
+
We want to make sure the batch memory contains relevant mentions, so we pre-train the models on batches of passages constructed from related Wikipedia articles with high entity overlap. Appendix [A.1](#page-11-0) provides more details on Mention Encoder data generation. We use the pre-trained Mention Encoder to construct the Mention Memory table from corpus, and use the BATCH-TOME model as the initialization point for TOME-specific pre-training (described in Section [2.4\)](#page-4-0).
|
| 75 |
+
|
| 76 |
+
Masked language model. Our primary pre-training objective is the standard masked language modeling task, with the loss computed based on the output of the second read (BATCH-TOME). To encourage the model to rely on memory, we increase the task's difficulty relative to standard BERT pre-training by masking entity mention tokens more aggressively.
|
| 77 |
+
|
| 78 |
+
Coreference resolution. We wish to encourage the Mention Encoder to represent the entity attributes expressed by entity mentions, so we also employ an entity-oriented pre-training task to the output of BATCH-TOME for which such attribute information is likely to be especially helpful. Unlike Entities as Experts [\(Fevry et al., 2020\)](#page-9-2), ´ BATCH-TOME does not use entity embeddings, so we cannot use the entity linking task. Instead, we apply a related entity coreference resolution objective, which asks the model to predict whether two linked mentions correspond to the same entity based on the similarity of their encodings. Given that entity surface forms are frequently masked, the model needs to instead use the properties of other mentions in the batch to determine which entity it is most compatible with, incentivizing the Mention Encoder to encode such properties. We compute a coreference mention encoding for every linked mention in the batch by applying a separate SpanEncodingLayer on the output of BATCH-TOME. The loss is implemented using cross-entropy over dot-product similarity scores. See Appendix [A.2](#page-11-1) for details.
|
| 79 |
+
|
| 80 |
+
As TOME attends to the full Mention Memory instead of in-batch memory, we do not employ the batching procedure from Mention Encoder pre-training, instead sampling Wikipedia passages randomly. For the same reason, we replace the in-batch entity coreference objective by Mention Memory entity coreference, in which the model has to predict which mentions from the Mention Memory share an entity with the input mention. The goal of this auxiliary objective is to incentivize the model to learn to retrieve informative mention encodings to solve the semantically challenging task. Mention Memory entity coreference also allows us to solve tasks like TriviaQA or ComplexWebQA without a decoder by directly predicting the answer entity.
|
| 81 |
+
|
| 82 |
+
Entity prediction. Analogous to batch coreference resolution loss we compute mention encoding z<sup>m</sup> using the output of the TOME model. As in section [2.2,](#page-2-1) TopMem(zm) returns the top K memories with the largest dot product between the mention encodings z<sup>m</sup> and key mention encodings MemKey from the Mention Memory. The score EntProb(m, j) of entity j equals the sum of attention weights of memories corresponding to this entity.
|
| 83 |
+
|
| 84 |
+
<span id="page-4-1"></span>
|
| 85 |
+
$$\mathtt{EntProb}(m,j) = \frac{\sum_{i \in \mathtt{TopMem}(z_m)} \exp(z_m \cdot \mathtt{MemKey}(i)) \cdot \mathbb{1}\{\mathtt{MemEnt}(i) = j\}}{\sum_{i \in \mathtt{TopMem}(z_m)} \exp(z_m \cdot \mathtt{MemKey}(i))} \tag{10}$$
|
| 86 |
+
|
| 87 |
+
The final entity prediction is arg max<sup>j</sup> EntProb(m, j). Entity prediction loss Lep(m) for a mention m of entity Ent(m) is Lep(m) = − log EntProb(m, Ent(m)). Total loss equals the average loss over linked input mentions for which at least one memory of the same entity is retrieved.
|
| 88 |
+
|
| 89 |
+
Disallowed same passage retrieval. For each passage in the pre-training corpus, there exist memories corresponding to mentions in the passage generated from the unmasked version of the same passage. In order to prevent the model from 'cheating' by attending to such memories, we set the attention weight for all memories from the same passage to zero.
|
2110.07679/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-10-14T12:32:59.970Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36" etag="3MtVBP4SYkeSdQJZYq4y" version="15.5.2" type="device"><diagram id="4Qrbrz0dLJgbg5uPQ3XI" name="Page-2">3VdNc5swEP01e0wGJD6kI2BoL+3Fh5wJqIYJRo5Qaju/visBwW5ikjJJO6knM4bH6q3Q7r74AU22hy8q31XfZCkaIE55ALoCQlzCGX4Z5NgjjHo9sFF12UPOBKzrRzGsHNGHuhTdgPWQlrLR9e4cLGTbikKfYblScn8e9kM25RmwyzfibBsGWBd5I56F3dSlroa3IOGEfxX1phozuwHvn2zzMXgg7qq8lPsTiKZAEyWl7q+2h0Q05vDGc3mk7e2K3q/VvZtk392Vv45urnqy7E+WPL2CEq1+X2raU//Mm4fhvCCMgQRAKBaQxpAQiJDXmS5IcP9gXjnOy1KJrpsAoNFZgLlJOfAVcA6pD1EKzF6wBOII0hB4BjG1MSFEDFIPYgYMVzGIEcloENj4zNye8CZv3aIS+cz+kNrkI0PiaGWRDBhdkgwP+mIqdxFhq4Vqhb5MexTdEuI234q5Y8F6+MBjUwaGf6slObBb7+p28+5731Wyndm8Q9wr5vk04LjzJfSy0wWK4OUMxDGfRdyqLuaIXY/abvcXNqDlV3m7mS1uALzveZwxbHvfzmEEzFuSsiukmsnGrxc1fqdzdZnUX0Kp8zvR3UppenJGshY2pT7uXhkoLConVl4iI3YnOV5IEK4upX5/cabAE7uv0Aj1KLw+s1iCbeH+HwpMPkaBW/kRAtz/H4zMsMa2NHgqWClmaxE79hEijtFmI9KhGWWcY3zLOD0JftZ3vbxEtuJRZsI+mbT7PqPE559M2r3Q+wvK7pni4/D0P7XMWH+csrPrRXIwr+yLNvpvlT0w48jd4czx8N+k7NZO6OPoUbQ4mBX7qtZivcsLA+/RjiFW6W2Dd64ltD/VhcLw37zNK8bAfXIraPOE3AqtjrhuZHH9nmdweG44OKD95JfI6PqqE6/kBWTwaYNH2zxxTz4ELwYrMt5Ojsk+O/GdNP0F</diagram></mxfile>
|
2110.07679/main_diagram/main_diagram.pdf
ADDED
|
Binary file (17.3 kB). View file
|
|
|
2110.07679/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,69 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
One of the fundamental objectives in pursuit of artificial intelligence is to enable machines with the ability to intelligently communicate with human in natural languages, with one of the widely-heralded applications being the task-oriented dialogue (ToD) systems [\(Gupta et al.,](#page-9-0) [2006;](#page-9-0) [Bohus and Rudnicky,](#page-8-0) [2009\)](#page-8-0). Recently, ToD systems have been successfully deployed to assist users with accomplishing certain domain-specific tasks such as hotel booking, alarm setting or weather query [\(Eric et al.,](#page-9-1) [2017;](#page-9-1) [Wu et al.,](#page-10-0) [2019;](#page-10-0) [Lin et al.,](#page-9-2) [2020;](#page-9-2) [Zhang et al.,](#page-10-1) [2020\)](#page-10-1), thanks to the joint advent of neural networks and availability of domain-specific data. However, most existing ToD systems are predominately built for English, limiting their service for *all* of the
|
| 4 |
+
|
| 5 |
+
world's citizens. The reason of this limitation lies in the stark lack of high-quality multilingual ToD datasets due to the high expense and challenges of human annotation [\(Razumovskaia et al.,](#page-9-3) [2021\)](#page-9-3).
|
| 6 |
+
|
| 7 |
+
One solution to this is annotating conversations in other languages from scratch, e.g., CrossWoZ [\(Zhu et al.,](#page-10-2) [2020\)](#page-10-2) and BiToD [\(Lin et al.,](#page-9-4) [2021\)](#page-9-4). However, these methods involve expensive human efforts for dialogue collection in the other languages, resulting in a limited language/domain coverage. The other major line of work focused on translating an existing English ToD dataset into target languages by professional human translators [\(Upadhyay et al.,](#page-10-3) [2018;](#page-10-3) [Schuster et al.,](#page-10-4) [2019;](#page-10-4) [van der Goot et al.,](#page-10-5) [2021;](#page-10-5) [Li et al.,](#page-9-5) [2021\)](#page-9-5). Despite the increasing language coverage, these methods simply translated English named entities (e.g., location, restaurant name) into the target languages, while ignored the fact that these entities barely exist in countries speaking these languages. This hinders a trained ToD system from supporting the real use cases where a user looks for local entities in a target-language country. For example in Figure [1,](#page-1-0) a user may look for the British Museum when traveling to London (A.), while look for the Oriental Pearl Tower when traveling to Shanghai (B.).
|
| 8 |
+
|
| 9 |
+
In addition, prior studies [\(Cheng and Butler,](#page-8-1) [1989;](#page-8-1) [Kim,](#page-9-6) [2006\)](#page-9-6) have shown that code-switching phenomena frequently occurs in a dialogue when a speaker cannot express an entity immediately and has to alternate between two languages to convey information more accurately. Such phenomena could be ubiquitous during the cross-lingual and cross-country task-oriented conversations. One of the reasons for code-switching is that there are no exact translations for many local entities in the other languages. Even though we have the translations, they are rarely used by local people. For example in Figure [1](#page-1-0) (C.), after obtaining the recommendation from a ToD system, a Chinese speaker traveling to London would rather use the English
|
| 10 |
+
|
| 11 |
+
<sup>∗</sup>Bosheng Ding is under the Joint PhD Program between Alibaba and Nanyang Technological University.
|
| 12 |
+
|
| 13 |
+
<span id="page-0-0"></span><sup>1</sup>Our code is available at [https://ntunlpsg.](https://ntunlpsg.github.io/project/globalwoz/) [github.io/project/globalwoz/](https://ntunlpsg.github.io/project/globalwoz/).
|
| 14 |
+
|
| 15 |
+
<span id="page-1-0"></span>
|
| 16 |
+
|
| 17 |
+
Figure 1: Examples of four use cases for multilingual ToD systems: A. Use Case E&E: A English speaker travels to a country of English. B. Use Case F&F: A foreign language speaker travels to a country of the foreign language. C. Use Case F&E: A foreign language speaker travels to a country of English. D. Use Case E&F: A English speaker travels to a country of a foreign language.
|
| 18 |
+
|
| 19 |
+
entity "British Museum" than its Chinese translation to search online or ask local people. To verify this code-switching phenomena, we have also conducted a case study ([§6.1\)](#page-6-0) which shows that searching the information about translated entities online yields a much higher failure rate than searching them in their original languages. Motivated by these observations, we define *three unexplored use cases*[2](#page-1-1) of multilingual ToD where a foreignlanguage speaker uses ToD in the foreign-language country (F&F) or an English country (F&E), and an English speaker uses ToD in a foreign-language country (E&F). These use cases are different from the traditional E&E use case where an English speaker uses ToD in an English-speaking country.
|
| 20 |
+
|
| 21 |
+
To bridge the aforementioned gap between existing data curation methods and the real use cases, we propose a novel data curation method that *globalizes* an existing multi-domain ToD dataset beyond English for the three unexplored use cases. Specifically, building on top of MultiWoZ [\(Budzianowski](#page-8-2) [et al.,](#page-8-2) [2018\)](#page-8-2) — an English ToD dataset for dialogue state tracking (DST), we create GlobalWoZ, a new multilingual ToD dataset in three new targetlanguages via machine translation and crawled ontologies in the target-language countries.
|
| 22 |
+
|
| 23 |
+
Our method only requires minor human efforts to post-edit a few hundred machine-translated dialogue templates in the target languages for evaluation. Besides, as cross-lingual transfer via pre-
|
| 24 |
+
|
| 25 |
+
trained multilingual models [\(Devlin et al.,](#page-8-3) [2019;](#page-8-3) [Conneau et al.,](#page-8-4) [2020;](#page-8-4) [Liu et al.,](#page-9-7) [2020;](#page-9-7) [Xue et al.,](#page-10-6) [2021\)](#page-10-6) has proven effective in many cross-lingual tasks, we further investigate another question: *How do these multilingual models trained on the English ToD dataset transfer knowledge to our globalized dataset?* To answer this question, we prepare a few baselines by evaluating popular ToD systems on our created test datasets in a *zero-shot* cross-lingual transfer setting as well as a *few-shot* setting.
|
| 26 |
+
|
| 27 |
+
Our contributions include the following:
|
| 28 |
+
|
| 29 |
+
- To the best of our knowledge, we provide the first step towards analyzing three unexplored use cases for multilingual ToD systems.
|
| 30 |
+
- We propose a cost-effective method that creates a new multilingual ToD dataset from an existing English dataset. Our dataset consists of highquality test sets which are first translated by machines and then post-edited by professional translators in three target languages (Chinese, Spanish and Indonesian). We also leverage machine translation to extend the language coverage of test data to another 17 target languages.
|
| 31 |
+
- Our experiments show that current multilingual systems and translate-train methods fail in zeroshot cross-lingual transfer on the dialogue state tracking task. To tackle this problem, we propose several data augmentation methods to train strong baseline models in both zero-shot and fewshot cross-lingual transfer settings.
|
| 32 |
+
|
| 33 |
+
<span id="page-1-1"></span><sup>2</sup> See comparisons of these use cases in Appendix [A](#page-11-0)
|
| 34 |
+
|
| 35 |
+
# Method
|
| 36 |
+
|
| 37 |
+
In order to globalize an existing English ToD dataset for the three aforementioned use cases, we propose an approach consisting of four steps as shown in Figure [2:](#page-3-0) (1) we first extract dialogue templates from the English ToD dataset by replacing English-specific entities with a set of generalpurpose placeholders ([§2.1\)](#page-2-0); (2) we then translate the templates to a target language for both training and test data, with one key distinction that we only post-edit the test data by professional translators to ensure the data quality for evaluation ([§2.2\)](#page-2-1); (3) next, we collect ontologies [\(Kiefer et al.,](#page-9-8) [2021\)](#page-9-8) containing the definitions of dialogue acts, local entities and their attributes in the target-language countries ([§2.3\)](#page-2-2); (4) finally, we tailor the translated templates by automatically substituting the placeholders with entities in the extracted ontologies to construct data for the three use cases ([§2.4\)](#page-3-1).
|
| 38 |
+
|
| 39 |
+
We start with MultiWoZ 2.2 [\(Zang et al.,](#page-10-7) [2020\)](#page-10-7) – a high-quality multi-domain English ToD dataset with more accurate human annotations compared to its predecessors MultiWoZ 2.0 [\(Budzianowski](#page-8-2) [et al.,](#page-8-2) [2018\)](#page-8-2) and MultiWoz 2.1 [\(Eric et al.,](#page-9-9) [2020\)](#page-9-9). For the sake of reducing human efforts for collecting ToD context in the target languages, we re-use the ToD context written by human in Multi-WoZ as the dialogue templates. Specifically as shown in Figure [2,](#page-3-0) we replace the English entities in MultiWoz by a set of general-purpose placeholders such as [attraction-name0] and [attraction-postcode1], where each placeholder contains the entity's domain, attribute and ID. To do so, we first build a dictionary with entity-placeholder pairs by parsing the annotations of all dialogues. For example, from a dialogue text —*"I recommend Whale of a time and the post code is cb238el."*, we obtain two entity-placeholder pairs from its human annotations, i.e., (*Whale of a time*, [attraction-name0]) and (*cb238el*, [attraction-postcode1]). Next, we identify entities in the dialogue by their word index from the human annotations, replace them with their placeholders in the dictionary, and finally obtain dialogue templates with placeholders. Notably, we skip the entities with their attributes of [choice] and [ref] that represent the number of choices and booking reference number, as these attributes could be used globally.
|
| 40 |
+
|
| 41 |
+
Following [Liu et al.](#page-9-10) [\(2021\)](#page-9-10) that translates sentences with placeholders, we use a machine translation system[3](#page-2-3) to translate dialogue templates with our designed placeholders. As we observe, a placeholder containing an entity domain, attribute and ID (e.g., attraction-name0) is useful to provide contextually meaningful information to the translation system, thus usually resulting in a highquality translation with the placeholder unchanged [4](#page-2-4) . This also enables us to easily locate the placeholders in the translation output and replace them with new entities in the target language.
|
| 42 |
+
|
| 43 |
+
To build a high-quality test set for evaluation, we further hire professional translators to post-edit a few hundred machine-translated templates, which produces natural and coherent sentences in the target languages.[5](#page-2-5) With the goal of selecting representative test templates for post-editing, we first calculate the frequency of all the 4-gram combinations in the MultiWoZ data, and then score each dialogue in the test set by the sum of the frequency of all the 4-gram combinations in the dialogue divided by the dialogue's word length. We use this scoring function to estimate the representiveness of a dialogue in the original dataset. Finally, we select the top 500 high-scoring dialogues in the test set for post-editing.[6](#page-2-6) We also use the same procedure to create a small high-quality training set for few-shot cross-lingual transfer setting.
|
| 44 |
+
|
| 45 |
+
Meanwhile, we crawl the attribute information of local entities in three cities from public websites (e.g., tripadvisor.com, booking.com) to create three ontologies for the three corresponding target languages respectively. As shown in Table [8](#page-12-0) in Appendix [E,](#page-12-1) we select Barcelona for Spanish (an Indo-European language), Shanghai for Mandarin (a Sino-Tibetan language) and Jakarta for Indonesian (an Austronesian language), which cover a set of typologically different language families.
|
| 46 |
+
|
| 47 |
+
Given a translated dialogue template, we can easily sample a random set of entities for a domain of interest from a crawled ontology and assign the entities to the template's placeholders to obtain a
|
| 48 |
+
|
| 49 |
+
<span id="page-2-3"></span><sup>3</sup>We use Google Translate ([https://cloud.google.](https://cloud.google.com/translate) [com/translate](https://cloud.google.com/translate)), an off-the-shelf MT system.
|
| 50 |
+
|
| 51 |
+
<span id="page-2-5"></span><span id="page-2-4"></span><sup>4</sup>Appendix [B](#page-11-1) has an example of label sequence translation.
|
| 52 |
+
|
| 53 |
+
<sup>5</sup>Appendix [C](#page-11-2) shows the bleu scores between MT test data and MTPE test data.
|
| 54 |
+
|
| 55 |
+
<span id="page-2-6"></span><sup>6</sup>Appendix [D](#page-12-2) shows the English test data distribution.
|
| 56 |
+
|
| 57 |
+
<span id="page-3-0"></span>
|
| 58 |
+
|
| 59 |
+
Figure 2: Illustration of our proposed pipeline: 1. Automatic Template Creation 2. Labeled Sequence Translation 3. Localized Ontologies Collection 4. Automatic Template Filling
|
| 60 |
+
|
| 61 |
+
new dialogue in the target language. Repeating this procedure on each dialogue template, we can easily build a high-quality labeled dataset in the target language. Table 9 in Appendix F shows the statistics of our collected entities in the target languages compared with the English data. The number of our collected entities are either larger than or equal to those in the English data except for the "train" domain; we collected the information about only 100 "trains" for each languages due to the complexity in collecting relevant information.
|
| 62 |
+
|
| 63 |
+
After the above steps, we assign entities in a target language to the translated templates in the same target language for the F&F case, while assigning target-language entities to the English (source-language) templates for the F&E case. As for the E&F case, we keep the original English context by skipping the translation step and replace the place-holders with local entities in the target language (see Figure 2 for examples).
|
| 64 |
+
|
| 65 |
+
To sum up, our proposed method has three key properties: (1) our method is *cost-effective* as we only require a limited amount of post-editing efforts for a test set when compared to the expensive crowd-sourced efforts from the other studies; (2) we can easily sample entities from an ontology to create *large-scale machine-translated data* as a way of data augmentation for training; (3) our method is *flexible* to update entities in a ToD system whenever an update of ontology is available, e.g., extension of new entities. We refer the readers
|
| 66 |
+
|
| 67 |
+
to Table 10 for the data statistics of GlobalWoZ and Figure 9 for dialogue examples in the appendix.
|
| 68 |
+
|
| 69 |
+
Our experiments focus on the dialogue state tracking (DST), one of the fundamental components in a ToD system that predicts the goals of a user query in multi-turn conversations. We follow the setup in MultiWoZ (Budzianowski et al., 2018) to evaluate ToD systems for DST by the joint goal accuracy which measures the percentage of correctly predicting all goals in a multi-turn conversation.
|
2110.15072/main_diagram/main_diagram.drawio
ADDED
|
@@ -0,0 +1 @@
|
|
|
|
|
|
|
| 1 |
+
<mxfile host="app.diagrams.net" modified="2021-06-03T23:37:34.595Z" agent="5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36" etag="ti1BtuNq66Mow_JwFxCY" version="14.7.4"><diagram id="L0fRDlyy-iFwrJA7JFYw" name="Page-2">7Z1dc5s4FIZ/TS6b4cNg+zK2k+7O7HY6TWfbXsog20wxcrFS2/31K4EwIAmDHUC0UdqZIAEC9JxzJL0S5M6eb4/vY7Db/It8GN5Zhn+8sxd3lmXajkl+0ZxTmjO17DRjHQc+OyjPeA5+QZZpsNyXwIf70oEYoRAHu3Kmh6IIeriUB+IYHcqHrVBYvuoOrNkVjTzj2QMhFA77Evh4k+ZOrHGe/xcM1pvsyqY7TfdsQXYwK3i/AT46FLLsxzt7HiOE063tcQ5DWnlZvYD4w4cfX+ej9X/xl28LP5jOkPMuLezpmlPOjxDDCLdbtJUW/ROEL6y+2LPiU1aB5LF3dBODJc2abfA2JEmTbB42AYbPO+DR/QdiPyRvj0GMmRkYJE24YhBEMGbneCgMwW4fJIWlR2yC0P8HnNALzi6TpWYxeol86LNzzwRoYkUKZtcxXZJmTwJjDI8c+pp6M88wiRdAtIU4PpHzslIy/swBTJelD7k5WROWtymY0ojlAWbB63PROSWywUBdAc1uDo08Ow5A+Il4FojWTfiJfPwY7T6DeA0xy9ihIMIwfvxJqnXP8lZBGM5RiCjmCEX0Qhjt2M4QrrJzlwhjtGWJmFXWudCkppwZ+U/qbm7cO3cOeZo5SZt5mvynh8d4jqI9jolx0TIg2OMD3ONXWYIltwRlpEcCaePebRV2EnRBDhuRaluFiZNtAt+HURts23NVuxaQPe2RjyPhM9J8qkJpxqaAa2T1iMuV4HI0ripcE1PA1at3jSW4xhpXbUel6F59tlYTCS9TKa87y15NPOh5r4ZGOhroOyyUu5w4I8foEqekn9krzqlu3K7hZRkSXmaPvLIRdwmYrYFVAhspbt9MsYGrpDW00fftUC6Ptm1utH2u/QKkXodgptiqtepSb3i0ndr/xdFcv6hlLd6bDqCp8V8k1GuXxJI1cSTDDaklLuMSKPfHC5Vmk+p4t0/q44EcYLq7Y1Ip2X6ytc4NWWFPdbWyuuip+u7Sdbqxg2OZtzKzMHVDWidbyyCdhxj9ULpiskG3pNfp1hW2oI61OEfxxqU2q35uod+YKZtbUNvZGXYbWAVwKG2gnou4DthENTDZbMSbntyrAaZc37aukAKUdDOFMFjsaJb7oF11O3n9xlYOTRzU615nS73OgakDtkwdUD4B5Ttw4o/a79JMrKXtvjLgTgcGUI/jq6eVLgTUflef6WF8VwHVrh/G94taNorXAbXONwYUUIc3yFc9JTzsUbwtjuJ/mxawyk27cLTrtG/lgxBbHOvrNrOlNtMZWMiVrVpsl/bvFnLdgREauozTPMZCk0TZsSzGTt2xDbqMsWc+pwyX6hirhZ7OYuzAhJ6RTOh52zFWnZIze/j44/Onh/d//wqOT7t4+3m9sN6JgAQ6BQSlEOeD/SYJfrSGQBisSVUvPEhdi2TQygk8ED6wHVsCIyGawd4e1/RF1vv03VEr/U2LpXHKuHdpjzR5u9W07l16vQhhbyMBMmHpQlB9ejLID73Yd5ifJMTeWfKPXieIiRUFiN7onsT2TafxeJxNsGcrPkwxImezhEUjsLoyggZqnjaCro1gqtgIGrysqo2gYyOwx4qNQJShBCOQNcMFwwD7Xfp1gFVwpDZxqZNcYsK3sZUgE3bZJwIkfe1V8kPyiVX5AcxLYU1/VzhdbgLA7FGjksJsIFFpmFUwJ2WYVo/KlBRmA2FKw6wSi4cGs8H7RBpmFUxOz7B71DOkMBuoVBpmBUyHnwBQDbOBNqVhVsF0OZg9rtyUj2sbqBuaZlUPaGhx1mwgU0B/DZ9ZEsV4g9YoAuFjnjuD4TIVB1Ec/KLqf0gzI/8hHXQuvBDs94FXtoLzADdLfASYDG6jJMcyZF+5usxbNvQsW4JFb+sY4K/UnsgwMU19Y/dAtxdHZmpJ4pQlIlLXhZNo8ltxX35aksrOS6uS1t8NRkQYoJfYgxfosZElzoT4i/3ci/qoIzG6LC+GIcDBz/IzyCyRXeEjVe1zm7e5jqI54Yw5fUx2Vm7PQkEjmyvI5gpK60EoKHGM82O/wlcs7Sv1vjIUm6+fGe/Q5l3eVKc32rxjcqOshjZPrAmcCoexmbTqG+ZfmGQNU9V9jY2Lx5ON9A7adcAGcqp2wKE2VkobIYdXK9xbGyG+P25045AO72CTyw7J3xd3fEcO2UDa1g6pxCFVOZrQMNzqaG5PjibccI2j8ffVj6M1mHb4wxxNTStW262sXwzUZSvWlnONxpxzmcNoxZyRCudqMA2kneu3bZF4m7J5Ma3xWIzrQ9q8kNGW0/A37NY4DX9fbh9OI4r6D4LXYHjEZYMvWylTWyVLCZsveJFpzbmebDTVk60RSz+BbRDSmpwTswjItS3jAzx0qAkLn9jPhvg1mnBn61Uk3xkTPw+m0TZB6w4NrSj3zzXaNt5MVo9WVKcXGm0bL3eoRyvqno8a7U1o+bko5WhFBe1Jo23j2zvq0YqazXuN9ia0g+tGiYqBDsjtrO92VKMVl5HqgNzOWifJG1n9ohUXleqA3M5CfuUBWatRXa0EV+21ku/aaTWqnTc2VHut5It3Wo1qZ8m/cq/ValRXLwBI3mluCS1J5n/QOZ0oyv8stv34Pw==</diagram></mxfile>
|
2110.15072/main_diagram/main_diagram.pdf
ADDED
|
Binary file (69.7 kB). View file
|
|
|
2110.15072/paper_text/intro_method.md
ADDED
|
@@ -0,0 +1,137 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# Introduction
|
| 2 |
+
|
| 3 |
+
To this day, the majority of deep learning architectures consists of differentiable computation blocks and relies on gradient estimates for learning. At the same time, architectures with discrete intermediate components are a good fit for incorporating inductive biases [@cordonnier2021differentiable; @xu2015show] or dynamic control flow [@le2020revisiting; @graves2016adaptive]. One of the approaches to train such architectures is to replace the discrete component with a stochastic latent variable and optimize the expected objective.
|
| 4 |
+
|
| 5 |
+
In practice, the expectation has high computational cost, thus one typically resorts to stochastic estimates for the expectation and its gradient. Particularly, the two prevalent approaches to estimate the gradient of the objective are the score function estimator [@williams1992simple] and the reparameterization trick [@kingma2013auto; @rezende2014stochastic] for relaxed discrete variables [@maddison2016concrete; @jang2016categorical]. The former puts mild assumptions on the distribution and the objective, requiring the gradient of log-probability with respect to the distribution parameters to be differentiable, and provides unbiased estimates for the objective gradient. However, the naive estimate suffers from high variance and is less intuitive in implementation. In comparison, the reparameterized gradient estimates seamlessly integrate within the backpropagation algorithm and exhibit low variance out of the box. At the same time, the relaxation requires an architecture to be defined on the extended domain of the relaxed variable and introduces bias to the gradient estimate.
|
| 6 |
+
|
| 7 |
+
In the recent years, the attention of the community shifted towards models with structured latent variables. Informally, a structured variable models a distribution over structured objects such as graphs [@corro2019differentiable; @paulus2020gradient], sequences [@fu2020latent] or matchings [@mena2018learning]. Such latent variable may alter the computation graph or represent a generative process of data. Often, a structured variable is represented as a sequence of categorical random variables with a joint distribution incorporating the structure constraints (e.g., the fixed number of edges in an adjacency matrix of a tree). Recent works on structured latent variables address model training largely through the reparameterization trick using relaxed variables. In fact, the Gumbel-Softmax trick naturally translates to structured variables when $\operatorname{arg}\max$ operator is applied over a structured domain rather than component-wise [@paulus2020gradient]. In contrast, score function estimators are now less common in structured domain, with a few exceptions such as [@yogatama2016learning; @havrylov2019cooperative]. The primary difficulty is the sample score function: neither Gibbs distributions, nor distribution defined through a generative process have a general shortcut to compute it.
|
| 8 |
+
|
| 9 |
+
In our work, we develop a framework to define structured variables along with a low-variance score function estimator. Our goal is to allow training models that do not admit relaxed variables and to improve optimization by alleviating the bias of the relaxed estimators. To achieve the goal we define the structured variable as an output of an algorithm with a perturbed input. Then, we outline a family of algorithms with a common property we call *stochastic invariant*. The property was inspired by the observation in [@paulus2020gradient Appendix, Sec. B], where the authors showed that the Kruskal's algorithm [@kruskal1956shortest] and the CLE algorithm [@edmonds1967optimum] are recursively applying the Gumbel-Max trick. We construct new algorithms with the same property and show how to use the property to learn structured latent variables. In the experimental section, we report performance on par with relaxation-based methods and apply the framework in a setup that does not allow relaxations.
|
| 10 |
+
|
| 11 |
+
# Method
|
| 12 |
+
|
| 13 |
+
The section below shows how to define a distribution over structured domain. Conceptually, we define a structured random variable as an output of an algorithm with a random input (e.g., to generate a random tree we return the minimum spanning tree of a graph with random weights). A common solution to incorporate such variable in a latent variable model is to replace the original algorithm with a differentiable approximation to allow gradient-based learning[@corro2019differentiable; @mena2018learning]. Such solution bypasses the difficutly of computing the sample probability. In contrast, we outline a family of algorithms for which we can get the probability of each intermediate computation step. To get the probabilities we restrict our attention to algorithms with specific recursive structure and random inputs with exponential distribution. In the next section, we leverage the probabilities for approximate inference in latent variable models without the differentiable approximations of the algorithm.
|
| 14 |
+
|
| 15 |
+
We illustrate our framework with a recursive algorithm generating a subset of a fixed size. The lemma below is a well-known result used to generate categorical random variables using a sequence of exponential random variables.
|
| 16 |
+
|
| 17 |
+
::: lemma
|
| 18 |
+
[]{#lemma:exp-min label="lemma:exp-min"} *(the Exponential-Min trick)* If $\exprv_i \sim \operatorname{Exp}{(\expparam_i)}, i \in \{1, \dots, \thetadim\}$ are independent, then for $\catrv := \operatorname{argmin}\limits_{i} \exprv_i$
|
| 19 |
+
|
| 20 |
+
1. the outcome probability is $\probnew{\catrv}{\catrv = \catreal; \expparam} \propto \expparam_\catreal$;
|
| 21 |
+
|
| 22 |
+
2. random variables $\exprv_i' := \exprv_i - \exprv_\catrv, i \in \{1, \dots, \thetadim\}$ are mutually independent given $\catrv$ with $\exprv_i' \mid \catrv \sim \operatorname{Exp}(\expparam_i)$ when $i \neq \catrv$ and $\exprv_i' = 0$ otherwise.[^6]
|
| 23 |
+
:::
|
| 24 |
+
|
| 25 |
+
The lemma is equivalent to *The Gumbel-Max trick*, defined for the variables $\gumbrv_i := - \log \exprv_i, i \in \{1,\dots,\thetadim\}$ and the maximum position $\operatorname{argmax}\limits_{i} \gumbrv_i$. In the above case, a variable $\gumbrv_i$ has a Gumbel distribution with the location parameter $\gumbparam_i = \log \expparam_i$, hence the name. Though the Gumbel-Max trick formulation is more common in the literature, we formulate the framework in terms of the exponential distribution and the equivalent Exponential-Min trick. Although the two statements are equivalent and we use their names interchangeably, some of the examples have a natural formulation in terms of the exponential distribution.
|
| 26 |
+
|
| 27 |
+
Importantly, the second claim in Lemma [\[lemma:exp-min\]](#lemma:exp-min){reference-type="ref" reference="lemma:exp-min"} allows applying the Exponential-Min trick succesively. We illustrate this idea with an algorithm for finding top-k elements. We present the recursive form of $\argtopk$ in Algorithm [\[alg:top-k\]](#alg:top-k){reference-type="ref" reference="alg:top-k"}. For each recursion level, the algorithm finds the minimum element, decrements $k$ and calls itself to find the subset excluding the minimum variable. For reasons explained below, the algorithm subtracts the minimum from the sequence $\exprv_{j}' = \exprv_j - \exprv_\tracerv$ before the recursion. This step does not change the output and may seem redundant.
|
| 28 |
+
|
| 29 |
+
Assuming the input of the algorithm is a vector $\exprv$ of independent exponential variables with rate parameters $\expparam$, the first argument in the recursive call $\exprv'$ is again a vector of independent exponential variables (given $\tracerv$) due to Lemma [\[lemma:exp-min\]](#lemma:exp-min){reference-type="ref" reference="lemma:exp-min"}. In other words, the input distribution class is *invariant* throughout the recursion. Subtraction of the minimum is not necessary, but it allows to apply Lemma [\[lemma:exp-min\]](#lemma:exp-min){reference-type="ref" reference="lemma:exp-min"} directly and simplifies the analysis of the algorithms. Besides that, for each recursion level variable $\tracerv$ has categorical distribution (conditioned on $\tracerv$ found in the above calls) with output probabilities proportional to $\expparam_k, k \in K$.
|
| 30 |
+
|
| 31 |
+
We use upper indices to denote the recursion depth and, with a slight abuse of notation, denote the concatenation of variables $\tracerv$ for each recursion depth as $\tracerv := (\tracerv^1, \dots, \tracerv^k)$. The output $\structrv$ is a set and does not take into account the order in $\tracerv$. Intuitively, $\tracerv$ acts as *the execution trace* of the algorithm, whereas $\structrv$ contains only partial information about $\tracerv$. The marginal probability of $\structreal$ is the sum $\probnew{\structrv}{\structrv = \structreal; \expparam} = \sum_{\tracereal \in \structrv^{-1}(\structreal)} \probnew{\tracerv}{\tracerv = \tracereal; \expparam}$ over all possible orderings of $\structreal = \{\structreal_1, \dots, \structreal_k\}$ denoted as $\structrv^{-1}(\structreal)$. The direct computation of such sum is prohibitive even for moderate $k$.
|
| 32 |
+
|
| 33 |
+
The $\argtopk$ illustration is a well-known extension of the Exponential-Min trick. In particular, the distribution of $\tracerv$ is known as the Plackett-Luce distribution [@plackett1975analysis] and coincides with $k$ categorical samples without replacement. Following the recursion, the observation probability factorizes according to the chain rule with $i$-th factor governed by equation $\probnew{\tracerv_i}{\tracerv_i = \tracereal_i \mid \tracereal_1, \dots, \tracereal_{i-1}; \expparam} = \tfrac{\expparam_{\tracereal_i}}{\sum_{j=1}^{\thetadim} \expparam_j - \sum_{j=1}^{i-1} \expparam_{\tracereal_j}}$. We discuss the multiple applications of the trick in Section [4](#sec:related-work){reference-type="ref" reference="sec:related-work"}. Next, we extend Algorithm [\[alg:top-k\]](#alg:top-k){reference-type="ref" reference="alg:top-k"} beyond subset selection.
|
| 34 |
+
|
| 35 |
+
<figure id="alg:general">
|
| 36 |
+
<div class="minipage">
|
| 37 |
+
<div class="algorithm">
|
| 38 |
+
<div class="algorithmic">
|
| 39 |
+
<p><span class="math inline">$\exprv, K, k$</span> <span class="math inline">$\structrv$</span> <span class="math inline">$\tracerv \Leftarrow \arg\min_{j \in K} \exprv_j$</span> <span class="math inline">$\exprv_j' \Leftarrow \exprv_j - \exprv_\tracerv$</span> <span class="math inline"><em>K</em><sup>′</sup>, <em>k</em><sup>′</sup> ⇐ <em>K</em> \ {<em>T</em>}, <em>k</em> − 1</span> <span class="math inline">$\exprv' \Leftarrow \{ \exprv_k' \mid k \in K'\}$</span> <span class="math inline">$\structrv' \Leftarrow F_{\text{top-k}}(E', K', k')$</span> <strong>return</strong> <span class="math inline">$\{\tracerv\} \cup \structrv'$</span></p>
|
| 40 |
+
</div>
|
| 41 |
+
</div>
|
| 42 |
+
</div>
|
| 43 |
+
<div class="minipage">
|
| 44 |
+
<div class="algorithm">
|
| 45 |
+
<div class="algorithmic">
|
| 46 |
+
<p><span class="math inline">$\exprv, K, R$</span> <span class="math inline">$\structrv$</span> <span class="math inline"><em>P</em><sub>1</sub>, …, <em>P</em><sub><em>m</em></sub> ⇐ <em>f</em><sub>split</sub>(<em>K</em>, <em>R</em>)</span> <span class="math inline">$\tracerv_i \Leftarrow \arg\min_{j \in P_i} \exprv_j$</span> <span class="math inline">$\exprv_j' \Leftarrow \exprv_j - \exprv_{\tracerv_i}$</span> <span class="math inline">$K', R' \Leftarrow f_{\text{map}}(K, R, \{\tracerv_i\}_{i=1}^m)$</span> <span class="math inline"><em>E</em><sup>′</sup> ⇐ {<em>E</em><sub><em>k</em></sub><sup>′</sup> ∣ <em>k</em> ∈ <em>K</em><sup>′</sup>}</span> <span class="math inline">$\structrv' \Leftarrow F_{\text{struct}}(E', K', R')$</span> <span class="math inline">$f_{\text{combine}}(\structrv', K, R, \{\tracerv_i\}_{i=1}^m)$</span></p>
|
| 47 |
+
</div>
|
| 48 |
+
</div>
|
| 49 |
+
</div>
|
| 50 |
+
<figcaption>The recursive algorithm for <span class="math inline">$\argtopk$</span> and the general algorithm with the stochastic invariant put side-by-side. Both algorithm perform the Exponential-Min trick and proceed with recursion using a subset of variables. The output <span class="math inline">$\structrv$</span> combines the current trace <span class="math inline">$\tracerv$</span> and the recursion output <span class="math inline">$\structrv'$</span>.</figcaption>
|
| 51 |
+
</figure>
|
| 52 |
+
|
| 53 |
+
In this section, we generalize Algorithm [\[alg:top-k\]](#alg:top-k){reference-type="ref" reference="alg:top-k"}. The idea is to preserve the property of Algorithm [\[alg:top-k\]](#alg:top-k){reference-type="ref" reference="alg:top-k"} that allows applying the Exponential-Min trick and abstract away the details to allow various instantiations of the algorithm. Algorithm [1](#alg:general){reference-type="ref" reference="alg:general"} is the generalization we put next to Algorithm [\[alg:top-k\]](#alg:top-k){reference-type="ref" reference="alg:top-k"} for comparison. It has a similar recursive structure and abstracts away the details using the auxiliary subrouties: $f_{\text{stop}}$ is the stop condition, $f_{\text{map}}$ and $f_{\text{combine}}$ handle the recursion and $f_{\text{split}}$ is an optional subroutine for the Exponential-Min trick. Additionally, we replace $k$ with an auxiliary argument $R$ used to accumulate information from the above recursion calls. Below, we motivate the subroutines and discuss the properties of a arbitrary instance of Algorithm [1](#alg:general){reference-type="ref" reference="alg:general"}.
|
| 54 |
+
|
| 55 |
+
After checking the stop condition with $f_{\text{stop}}$, Algorithm [1](#alg:general){reference-type="ref" reference="alg:general"} applies the Exponential-Min trick simultaneously over $m$ disjoint sets rather than the whole index set $K$. For example, such operation occurs when we find columnwise minimum in CLE algorithm[@edmonds1967optimum]. To allow the operation we construct *a partition* of indices $P_1, \dots, P_m$ and find the $\operatorname{arg}\min$ across the partition sets. To generate the partition, we introduce a new subroutine $f_{split}$ taking the index set $K$ and the auxiliary argument $R$ as inputs. The partition size $m$ may also be variable.
|
| 56 |
+
|
| 57 |
+
After the $m$ simultaneous Exponential-Min tricks, the generalized algorithm calls $f_{\text{map}}$ to select a subset of indices $K' \subsetneq K$ and to accumulate the necessary information for the next call in $R'$. Intuitively, the argument $R'$ represents *a reduction* to a smaller problem solved with a recursive call. In the $\argtopk$ example, $K'$ is $K \setminus \{\tracerv\}$ and $R'$ is the decrement $k - 1$. Note that Algorithm [1](#alg:general){reference-type="ref" reference="alg:general"} does not allow to capture such information with the other inputs $\exprv'$ and $K'$ exclusively.
|
| 58 |
+
|
| 59 |
+
Finally, the algorithm calls $f_{\text{combine}}$ to construct the structured variable $\structrv$ using the recursive call output $\structrv'$ and the other variables. In the top-k example, $f_{\text{combine}}$ appends the minimum variable index $\tracerv$ to the set $\structrv'$.
|
| 60 |
+
|
| 61 |
+
Now we argue that Algorithm [1](#alg:general){reference-type="ref" reference="alg:general"} preserves the invariant observed in Algorithm [\[alg:top-k\]](#alg:top-k){reference-type="ref" reference="alg:top-k"}. Again, we call the sequence of variables $\tracerv = (\tracerv_1, \dots, \tracerv_m)$ *the trace* of the algorithm. By design, if the input $\exprv$ is a sequence of independent exponential random variables, then the recursion input $\exprv'$ conditioned on $\tracerv$ is again a sequence of independent exponential distributions. For short, we call this property *the stochastic invariant*. The key to the stochastic invariant is the signature of the subroutines Algorithm [1](#alg:general){reference-type="ref" reference="alg:general"} uses. The algorithm only accesses $\exprv$ values though the Exponential-Min trick. As a result, the intermediate variables $K'$ and $R'$ as well as the output $\structrv$ depend on $\exprv$ only through $\tracerv$. In other words, the execution trace is a function of perturbation $\tracerv = \tracerv(\exprv)$ and the structured variable $\structrv = \structrv(\tracerv)$ is a function of the trace. Additionally, due to Lemma [\[lemma:exp-min\]](#lemma:exp-min){reference-type="ref" reference="lemma:exp-min"}, the trace components $\tracerv_1, \dots, \tracerv_m$ have categorical distributions, whereas $\exprv'_k, k \in K$ are exponential random variables. We prove these properties by induction w.r.t. the recursion depth in Appendix [7](#sec:proofs){reference-type="ref" reference="sec:proofs"}.
|
| 62 |
+
|
| 63 |
+
Given the above, we derive two modifications of Algorithm [1](#alg:general){reference-type="ref" reference="alg:general"} generalizing Lemma [\[lemma:exp-min\]](#lemma:exp-min){reference-type="ref" reference="lemma:exp-min"} and the Plackett-Luce distribution from the illustration. Algorithm [\[alg:log-prob\]](#alg:log-prob){reference-type="ref" reference="alg:log-prob"} computes the log-probability $\logprobnew{\tracerv}{\tracereal; \expparam}$ of a trace realization $\tracereal$. In Section [3](#sec:estimation){reference-type="ref" reference="sec:estimation"}, we use the algorithm output to construct gradient estimators. Again, the pseudo-code introduces index $j$ to denote the recursion depth and assumes the input $\tracereal = \{t^j_i\}_{i,j}$ is the concatenation of trace variables for all recursion depths $j=1,\dots,k$. Similarly, in Appendix [8](#sec:algorithms){reference-type="ref" reference="sec:algorithms"} we present an algorithm returning a sample from $\exprv \mid \tracerv = \tracereal$ given trace realization $\tracereal$.
|
| 64 |
+
|
| 65 |
+
This subsection contains an overview of algorithms with stochastic invariants along with the corresponding structured variables. We present the details and the pseudo-code in Appendix [11](#sec:pseudo-code){reference-type="ref" reference="sec:pseudo-code"}.
|
| 66 |
+
|
| 67 |
+
Analogous to the $\argtopk$ and the subset variable, the insertion sorting algorithm is an algorithm with the stochastic invariant. In the case of sorting, we do not omit the order of the trace variable $\tracerv$ and return the permutation $\structrv = \tracerv$. The resulting variable $\structrv$ has the Plackett-Luce distribution. We use the variable as a latent variable for insertion-based non-monotonic generation [@gu2019insertion]. As an alternative to the Plackett-Luce distribution, we consider a square parameter matrix and find a matching between rows and columns. We perturb the matrix and iteratively find the minimum element in the matrix. We exclude the row and the column containing the element and proceed to the next recursion step. Notably, in contrast to this algorithm, the Hungarian algorithm [@kuhn1955hungarian] for the minimum weight matching does not have the stochastic invariant.
|
| 68 |
+
|
| 69 |
+
As [@paulus2020gradient] observe, Kruskal's algorithm [@kruskal1956shortest] and Chu-Liu-Edmonds [@edmonds1967optimum] algorithm recursively apply the Exponential-Min trick, thus have the stochastic invariant. The former constructs the minimum spanning (MST) tree edge-by-edge. The corresponding trace variable $\tracerv$ is a sequence of edges, whereas $\structrv$ is an unordered set of edges. Interestingly, we could not represent Prim's algorithm [@prim1957shortest; @dasgupta2008algorithms] for the MST as an algorithm with the stochastic invariant. The Chu-Liu-Edmonds algorithm is an analog of Kruskal's algorithm for directed graphs. It returns the minimum tree $\structrv$ with a fixed root.
|
| 70 |
+
|
| 71 |
+
Additionally, we construct a latent binary tree variable specifically for the non-monotonic generation model [@welleck2019non]. In this model, each token in the sentence corresponds to a node in the tree. We assign weights to nodes and perturb the weights to obtain a sample. During the recursion, we set the minimum weight node to be the parent. We put the nodes on the left-hand side to the left subtree and the nodes on the right-hand side to the right subtree.
|
| 72 |
+
|
| 73 |
+
:::: algorithm
|
| 74 |
+
::: algorithmic
|
| 75 |
+
$\tracereal, \expparam, K, R$ $\logprobnew{\tracerv}{\tracereal; \expparam}$ $P_1, \dots, P_m \Leftarrow f_{\text{split}}(K, R)$ $\logprobnew{\tracerv}{\tracereal^1_i; \expparam} \Leftarrow \log \expparam_{\tracereal^1_i} - \log \left(\sum_{k \in P_i} \expparam_k \right)$ $\expparam'_{k} \Leftarrow \expparam_k$ $\expparam'_{\tracereal^1_i} \Leftarrow +\infty$ $K', R' \Leftarrow f_{\text{map}}(K, R, \{\tracereal^1_i\}_{i=1}^m)$ $\expparam' \Leftarrow \{ \expparam_k' \mid k \in K'\}$ $\logprobnew{\tracerv}{\tracereal^{>1} \mid \tracerv^{1} = \tracereal^{1}; \expparam} \Leftarrow F_{\text{log-prob}}(\tracereal^{>1}, \expparam', K', R')$ $\sum_{i=1}^m \logprobnew{\tracerv}{\tracereal^1_i; \expparam} + \logprobnew{\tracerv}{\tracereal^{>1} \mid \tracerv^1 = \tracereal^1 ; \expparam}$
|
| 76 |
+
:::
|
| 77 |
+
::::
|
| 78 |
+
|
| 79 |
+
In this section, we develop the gradient estimates for the structured distributions defined in Subsection [2.2](#sec:general){reference-type="ref" reference="sec:general"}. We start with a brief introduction of the two main approaches to gradient estimation for discrete categorical variables: the score function estimator [@williams1992simple] and the Gumbel-Softmax estimator [@maddison2016concrete; @jang2016categorical]. Then, we propose a low-variance modification of the score function estimator for the structured variables based on the intermediate representation of the variable. Finally, we conclude with a discussion of control variates we use together with the proposed estimator.
|
| 80 |
+
|
| 81 |
+
We consider gradient estimates of an expected objective $\nabla_\expparam \mathbb E_\catrv \loss{\catrv}$, where a discrete random variable $\catrv$ has parametric distribution $\probnew{\catrv}{\cdot; \expparam}$ with finite support. The basic *score function estimator* [@williams1992simple], also known as REINFORCE, defines an unbiased estimate for the gradient using a sample $\catreal$ as $\loss{\catreal} \nabla_\expparam \logprobnew{\catrv}{\catrv = \catreal; \expparam}$. The estimator does not make any assumptions about $\loss{\cdot}$, but requires an efficient sampling algorithm for ${\catrv}$ and the score function $\nabla_\expparam \logprobnew{\catrv}{\catrv = \catreal; \expparam}$. For a categorical random variable $\catrv$ with outcome probabilities $\probnew{\catrv}{\catrv = k; \expparam} \propto \expparam_k$ computation of $\nabla_{\expparam} \logprobnew{\catrv}{\catrv = \catreal; \expparam}$ is linear in the number of outcomes $\thetadim$. Therefore, the gradient estimation is fast when $\thetadim$ is small. However, for structured variables, such as graphs or sequences, the number of outcomes $\thetadim$ grows rapidly with the structure size. In this case, the estimator requires custom algorithms for sampling and estimating the score function.
|
| 82 |
+
|
| 83 |
+
*The Gumbel-softmax estimator*, introduced in [@jang2016categorical; @maddison2016concrete], is an alternative estimator that defines a continuous relaxation based on Lemma [\[lemma:exp-min\]](#lemma:exp-min){reference-type="ref" reference="lemma:exp-min"}. On the forward pass, it replaces the categorical variable $\catrv$ with a differentiable surrogate $\tilde{\catreal} = \operatorname{soft}\max{(\tfrac{\gumbreal}{\temp})}$, where the input $\gumbreal := (-\log \expreal_1, \dots, -\log \expreal_\thetadim)$ is a component-wise transformation of exponential samples. Due to Lemma [\[lemma:exp-min\]](#lemma:exp-min){reference-type="ref" reference="lemma:exp-min"}, the surrogate converges to the one-hot encoding of a categorical sample $\catreal$ as $\operatorname{soft}\max$ converges to $\operatorname{arg}\max$ when $\temp \rightarrow 0$. On the backward pass, the estimator uses the chain rule to construct the gradient $\nabla_\expparam \loss{\tilde{\catreal}}$ using the reparameterization trick [@kingma2013auto; @rezende2014stochastic] to define the partial derivative of a sample as $\tfrac{\partial \expreal_i}{\partial \expparam_i} = -\tfrac{\expreal_i}{\expparam_i}$. The Gumbel-Softmax estimator naturally extends to structured variables [@mena2018learning; @paulus2020gradient]. Specifically, the component-wise optimization in Lemma [\[lemma:exp-min\]](#lemma:exp-min){reference-type="ref" reference="lemma:exp-min"} can be replaced with a linear program over a structured set to generate structured variables and a relaxation can be used to define gradients. In the experimental section, we consider Stochastic Softmax Tricks (SST), introduced in [@paulus2020gradient], as the relaxation-based baseline for comparison.
|
| 84 |
+
|
| 85 |
+
As opposed to the score function estimator, the Gumbel-Softmax estimator requires a differentiable loss $\loss{\cdot}$. Such requirement imposes an additional restriction on a model architecture. The architecture must be defined for the relaxed samples as well as the hard samples, a non-trivial requirement for the models where discrete variables define branching [@le2020revisiting] or the inputs the model is not allowed to see [@kool2018attention; @gu2019insertion]. In practice, the vanilla score function estimator has notoriously higher variance compared to the Gumbel-Softmax estimator and requires a control variate to improve the gradient descent convergence.
|
| 86 |
+
|
| 87 |
+
In Subsection [2.2](#sec:general){reference-type="ref" reference="sec:general"}, we have introduced a probabilistic model involving an exponential variable $\exprv$ and the structured variable $\structrv$ defined as an output of an algorithm with input $\exprv$. Additionally, we have defined an intermediate trace variable $\tracerv = \tracerv(\exprv)$ such that $\structrv$ is a function $\structrv = \structrv(\tracerv)$. In this subsection, we apply $\tracerv$ to estimate gradients of $\mathbb E_\structrv \loss{\structrv}$.
|
| 88 |
+
|
| 89 |
+
In our setup, the score function $\nabla_\expparam \logprobnew{\exprv}{\exprv=\expreal;\expparam}$ is available out of the box. However, the score function estimator $$\begin{equation}
|
| 90 |
+
g_\exprv := \loss{\structrv(\expreal)} \nabla_\expparam \logprobnew{\exprv}{\exprv = \expreal; \expparam},
|
| 91 |
+
\end{equation}$$ which we refer to as $\exprv$-REINFORCE, is rarely used in practice. In fact, the variance of the score function estimator using $\exprv$ as an action space exceeds the variance of the estimator using $\structrv$. On the other hand, the score function for the structured variable $\nabla_\expparam \logprobnew{\structrv}{\structrv = \structreal; \expparam}$ involves marginalization w.r.t. $\exprv$ and may require significant computation resources to estimate.
|
| 92 |
+
|
| 93 |
+
To mitigate the variance of $g_\exprv$, we follow the observation of [@tucker2017rebar Appendix, Sec. B] and define another estimator as $$\begin{equation}
|
| 94 |
+
g_\tracerv := \loss{\structrv(\tracereal)} \nabla_\expparam \logprobnew{\tracerv}{\tracerv = \tracereal; \expparam},
|
| 95 |
+
\end{equation}$$ which is essentially the score function estimator that uses $\tracerv$ rather than $\exprv$. Below we refer to it as $\tracerv$-REINFORCE. Such estimate can be seen as the score function estimator $g_\exprv$ marginalized over $\exprv$ given $\tracerv$ (Appendix [7](#sec:proofs){reference-type="ref" reference="sec:proofs"} contains the detailed derivation) $$\begin{equation}
|
| 96 |
+
\loss{\structrv} \nabla_\expparam \logprobnew{\tracerv}{\tracerv; \expparam}
|
| 97 |
+
=
|
| 98 |
+
\mathbb E_{\exprv \mid \tracerv} \left[ \loss{\structrv} \nabla_\expparam \logprobnew{\exprv}{\exprv;\expparam} \mid \tracerv \right].
|
| 99 |
+
\end{equation}$$ As a result, the proposed gradient estimate $g_\tracerv$ is unbiased $$\begin{equation}
|
| 100 |
+
\mathbb E_{\exprv} \loss{\structrv} \nabla_\expparam \logprobnew{\exprv}{\exprv;\expparam} = \mathbb E_{\tracerv} \mathbb E_{\exprv \mid \tracerv} \left[ \loss{\structrv} \nabla_\expparam \logprobnew{\exprv}{\exprv;\expparam} \mid \tracerv \right],
|
| 101 |
+
\end{equation}$$ whereas the variance of the estimate does not exceed the variance of $g_\exprv$ $$\begin{equation}
|
| 102 |
+
\var{\exprv}{g_\exprv} = \var{\tracerv}{\mathbb E_{\exprv \mid \tracerv} g_\exprv} + \mathbb E_{\tracerv} \var{\exprv \mid \tracerv}{g_\exprv} = \var{\tracerv}{g_\tracerv} + \mathbb E_{\tracerv}\var{\exprv \mid \tracerv}{g_\exprv} \geq \var{\tracerv}{g_\tracerv}.
|
| 103 |
+
\end{equation}$$ In fact, in our experiments, we observed a significant difference in optimization due to the reduced variance of the estimator.
|
| 104 |
+
|
| 105 |
+
As we have argued in Subsection [2.2](#sec:general){reference-type="ref" reference="sec:general"}, we can compute the score function for trace variable and apply the estimator $g_\tracerv$ in practice. Similarly, marginalization with respect to $\tracerv \mid \structrv$ leads to the score function estimator $g_\structrv := \loss{\structreal} \nabla_\expparam \logprobnew{\structrv}{\structrv = \structreal; \expparam}$ and reduces the variance even further $g_\tracerv: \var{\tracerv}{g_\tracerv} \geq \var{\structrv}{g_\structrv}$. Therefore, the standard score function estimator is preferable when $\nabla_\expparam \logprobnew{\structrv}{\structrv = \structreal; \expparam}$ is available. In other cases, $g_\tracerv$ is a practical alternative.
|
| 106 |
+
|
| 107 |
+
In addition to the marginalization described above, we mitigate the variance of the score function estimator with control variates. We use two strategies to construct the control variates. The first strategy uses the algorithm for conditional reparameterization of $\exprv \mid \tracerv$ (Appendix [8](#sec:algorithms){reference-type="ref" reference="sec:algorithms"}, Algorithm [\[alg:conditional\]](#alg:conditional){reference-type="ref" reference="alg:conditional"}) and defines a family of sample-dependent control variates for the score function estimator [@tucker2017rebar; @grathwohl2017backpropagation]. The estimator generates a sample $\expreal$, runs the corresponding algorithm to obtain $\tracereal$ and $\structreal = \structrv(\tracereal)$, adds a control variate $c(\expreal)$ and uses an independent sample $\tilde{\expreal}$ from the conditional distribution $\exprv \mid \tracerv = \tracereal$ to eliminate the introduced bias $$\begin{equation}
|
| 108 |
+
\left(\loss{\structrv(\tracereal)} - c({\tilde{\expreal}})\right) \nabla_\expparam \logprobnew{\tracerv}{\tracerv=\tracereal; \expparam} - \nabla_\expparam c(\tilde{\expreal}) + \nabla_\expparam c(\expreal),
|
| 109 |
+
\end{equation}$$ In general, the above estimate extends to any pair of random variables $(B, Z)$ such that $B = B(Z)$ and the conditional distribution $Z \mid B$ admits the reparameterization trick. In [@tucker2017rebar], the control variate used the relaxed loss $\loss{\cdot}$, whereas [@grathwohl2017backpropagation] proposed to learn the control variate to improve the training dynamic. In our experiments, we use the estimator of [@grathwohl2017backpropagation] and refer to it as RELAX.
|
| 110 |
+
|
| 111 |
+
The second family of control variates we consider uses $K > 1$ samples $\tracereal_1,\dots, \tracereal_K$ to reduce the variance. Besides averaging the independent estimates, it uses the objective sample mean $\bar{\mathcal{L}} := \tfrac{\sum_{i=1}^K \loss{\structrv(\tracereal_i)}}{K}$ to reduce the variance even further: $$\begin{equation}
|
| 112 |
+
\label{eq:reinforce-plus}
|
| 113 |
+
\frac{1}{K - 1} \sum_{i=1}^{K} \left(\loss{\structrv(\tracereal_i)} - \overline{\mathcal{L}}\right) \nabla_{\expparam} \logprobnew{\tracerv}{\tracerv = \tracereal_i; \expparam}.
|
| 114 |
+
\end{equation}$$ Despite being quite simple, the above leave-one-out estimator [@kool2019buy] proved to be competitive with multiple recent works [@dongcoupled; @richter2020vargrad]. In our experiments, we refer to such estimator as $\tracerv$-REINFORCE+.[^7] To facilitate fair comparison, in a batch training setup we reduce the batch size proportionally to $K$.
|
| 115 |
+
|
| 116 |
+
Given a system of interacting particles, the dependencies of their states can be formally described as an undirected graph. We use Neural Relational Inference [@kipf2018neural], initially representing a relaxation-based approach, and examine its performance as a generative model and ability to reconstruct system interconnections, when applying score function techniques instead. We build an experiment in line with [@paulus2020gradient], generating data by translating a ground truth latent spanning tree (corresponding to the connections in the system) into a sequence of positions of points on a real plane, representing dynamics of the system over time. These points are obtained executing force-directed algorithm [@fruchterman1991graph] for $T = 10$ or $T = 20$ iterations and fed into the model.
|
| 117 |
+
|
| 118 |
+
The architecture of the model consists of a graph neural network (GNN) encoder, producing distribution over spanning trees, and a GNN decoder, producing distribution over time series of points positions. Model is trained in a manner of variational autoencoders (VAEs), optimizing ELBO, a lower bound on the joint log-likelihood of the observed data points at all timesteps.
|
| 119 |
+
|
| 120 |
+
We measure precision and recall with respect to the encoder samples and the ground truth dependency spanning tree. Table [\[nri1-table\]](#nri1-table){reference-type="ref" reference="nri1-table"} shows the results for T=10 iterations. Overall, score function methods performed better than their relaxation-based counterpart, achieving higher values of ELBO on the test set, but slightly worse performance in terms of structure recovery metrics. The results for T=20 and the detailed experimental setup are described in the Appendix [10.2](#sec:graphlayoutexp){reference-type="ref" reference="sec:graphlayoutexp"}.
|
| 121 |
+
|
| 122 |
+
We study the ability of the proposed score function estimators to recover the latent structure of the data in a setting, where it can be quite accurately described with an arborescence. Following details about data and models outlined by [@paulus2020gradient], we use a simplified version of the ListOps [@nangia2018listops] dataset. It consists of mathematical expressions (e.g. $\texttt{min[3 med[3 5 4] 2]}$), written in prefix form along with results of their evaluation, which are integers in $[0, 9]$. Given a prefix expression, one can algorithmically recover its structure as a parse tree. We bound maximal length of expressions and maximal depth of their parses along with removing the examples with $\texttt{summod}$ operator. These limitations sufficiently decrease the amount of memory a model should have to calculate the result and facilitates the usage of GNNs which now become capable of evaluating expressions by a bounded number of message passing steps.
|
| 123 |
+
|
| 124 |
+
Our model consists of two parts: an encoder and a classifier. The encoder is a pair of LSTMs that generate parameters of the distribution over rooted arborescence on token nodes. The classifier is a GNN, which passes a fixed number of messages over the sampled arborescence and feeds the resulting embedding of the first token into the final MLP. Models are trained simultaneously to minimize cross-entropy. We examine the performance of the models by measuring classification accuracy along with precision and recall with respect to the edges of ground truth parse trees. Table [\[edmonds-table\]](#edmonds-table){reference-type="ref" reference="edmonds-table"} shows score function based estimators, particularly -REINFORCE+, show more stable performance in comparison to relaxation-based estimator. Detailed description of the experiment can be found in the Appendix [10.3](#sec:edmondsexp){reference-type="ref" reference="sec:edmondsexp"}.
|
| 125 |
+
|
| 126 |
+
<figure id="toy-nonmonotonic" data-latex-placement="ht">
|
| 127 |
+
<div class="center">
|
| 128 |
+
<embed src="tables/nonmonotonic.pdf" />
|
| 129 |
+
</div>
|
| 130 |
+
<figcaption>Generative statistics of non-monotonic language model with different orders</figcaption>
|
| 131 |
+
</figure>
|
| 132 |
+
|
| 133 |
+
We apply our methods in a setting where the application of continuous relaxations is seemingly not possible. We construct a simple dataset by sampling a fixed number of balanced parentheses of various types (from 10 to 50 types) and model them with non-monotonic architecture, defined in [@welleck2019non]. Here, a language model generates sequences by consecutively adding new tokens in between previously generated ones. This process can be seen as modeling joint distribution over binary trees (treated as orders of generation) and sequences (which are generated along these orders).
|
| 134 |
+
|
| 135 |
+
We refer to this probabilistic model as the decoder, fix its architecture to be a single-layer LSTM and use teacher-forcing for training. More precisely, we compare two different modes of training decoder: fixed order and semi-supervised trainable order. In the first case, the order is obtained by applying a deterministic function to the input sequence. We study two fixed orders: *left-to-right*, corresponding to a degenerate tree with only right child nodes, and a more natural *smart* order, described in Appendix [10.4](#sec:nonmonexp){reference-type="ref" reference="sec:nonmonexp"}. In the semi-supervised (*ss*) case, 10% of sequences are paired with the smart order, and the overall model is trained as a VAE. We train the decoder by directly backpropagating the output signal and obtain the gradients for the encoder using RELAX estimator.
|
| 136 |
+
|
| 137 |
+
We choose models with the best validation perplexity and evaluate them by generating 200000 unique sequences and measuring portion of those which are present in the train dataset (Old) and those which are balanced and not seen during training (New and Correct). Results of this toy experiment show that it is possible to improve generative metrics by considering non-trivial orders of generation. Experiment is described in details in the Appendix [10.4](#sec:nonmonexp){reference-type="ref" reference="sec:nonmonexp"}.
|
2111.00295/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:ed08aa870b2c810e9b372ca9d13dc68bdaefff45350ff1e8bc764a515ec9dc5c
|
| 3 |
+
size 8137293
|
2111.00969/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:7962fb74609ea0497c68afd2fbf91db40fc7eaeaa28ea2fac3042ed448013abb
|
| 3 |
+
size 44505656
|
2111.09639/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:2e58dcd3049afd17ac6598053ab03c7665fd4a4645f827ed0141782ab668c75d
|
| 3 |
+
size 8402928
|
2111.15097/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c6895bcd291107f36f01391b1d9c9249f1151af71ea0f47ea498ecbb77c78dca
|
| 3 |
+
size 3400852
|
2112.00712/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:deb5731b9e8b4e31885941ec6483c12db48210bb3df3f8d81e52f70e4967b79f
|
| 3 |
+
size 540936
|
2112.03258/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:0605bde3525f3c5b90e937a55f1e849badf35485c6f47c8e00074fc7fd4a73d7
|
| 3 |
+
size 6386216
|
2112.05883/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fb4836521625c432a0f5083e58334939d8de87b9283e5461f7ad93434e0f66ef
|
| 3 |
+
size 1319147
|
2112.06598/paper.pdf
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:60e34b5be09609b857dbca2caecf65e1f2008280b7a0065ef204bb4d3e5527fe
|
| 3 |
+
size 824901
|