File size: 135,870 Bytes
6fa4bc9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 1635 1636 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 1755 1756 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 1791 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 1863 1864 1865 1866 1867 1868 1869 1870 1871 1872 1873 1874 1875 1876 1877 1878 1879 1880 1881 1882 1883 1884 1885 1886 1887 1888 1889 1890 1891 1892 1893 1894 1895 1896 1897 1898 1899 1900 1901 1902 1903 1904 1905 1906 1907 1908 1909 1910 1911 1912 1913 1914 1915 1916 1917 1918 1919 1920 1921 1922 1923 1924 1925 1926 1927 1928 1929 1930 1931 1932 1933 1934 1935 1936 1937 1938 1939 1940 1941 1942 1943 1944 1945 1946 1947 1948 1949 1950 1951 1952 1953 1954 1955 1956 1957 1958 1959 1960 1961 1962 1963 1964 1965 1966 1967 1968 1969 1970 1971 1972 1973 1974 1975 1976 1977 1978 1979 1980 1981 1982 1983 1984 1985 1986 1987 1988 1989 1990 1991 1992 1993 1994 1995 1996 1997 1998 1999 2000 2001 2002 2003 2004 2005 2006 2007 2008 2009 2010 2011 2012 2013 2014 2015 2016 2017 2018 2019 2020 2021 2022 2023 2024 2025 2026 2027 2028 2029 2030 2031 2032 2033 2034 2035 2036 2037 2038 2039 2040 2041 2042 2043 2044 2045 2046 2047 2048 2049 2050 2051 2052 2053 2054 2055 2056 2057 2058 2059 2060 2061 2062 2063 2064 2065 2066 2067 2068 2069 2070 2071 2072 2073 2074 2075 2076 2077 2078 2079 2080 2081 2082 2083 2084 2085 2086 2087 2088 2089 2090 2091 2092 2093 2094 2095 2096 2097 2098 2099 2100 2101 2102 2103 2104 2105 2106 2107 2108 2109 2110 2111 2112 2113 2114 2115 2116 2117 2118 2119 2120 2121 2122 2123 2124 2125 2126 2127 2128 2129 2130 2131 2132 2133 2134 2135 2136 2137 2138 2139 2140 2141 2142 2143 2144 2145 2146 2147 2148 2149 2150 2151 2152 2153 2154 2155 2156 2157 2158 2159 2160 2161 2162 2163 2164 2165 2166 2167 2168 2169 2170 2171 2172 2173 2174 2175 2176 2177 2178 2179 2180 2181 2182 2183 2184 2185 2186 2187 2188 2189 2190 2191 2192 2193 2194 2195 2196 2197 2198 2199 2200 2201 2202 2203 2204 2205 2206 2207 2208 2209 2210 2211 2212 2213 2214 2215 2216 2217 2218 2219 2220 2221 2222 2223 2224 2225 2226 2227 2228 2229 2230 2231 2232 2233 2234 2235 2236 2237 2238 2239 2240 2241 2242 2243 2244 2245 2246 2247 2248 2249 2250 2251 2252 2253 2254 2255 2256 2257 2258 2259 2260 2261 2262 2263 2264 2265 2266 2267 2268 2269 2270 2271 2272 2273 2274 2275 2276 2277 2278 2279 2280 2281 2282 2283 2284 2285 2286 2287 2288 2289 2290 2291 2292 2293 2294 2295 2296 2297 2298 2299 2300 2301 2302 2303 2304 2305 2306 2307 2308 2309 2310 2311 2312 2313 2314 2315 2316 2317 2318 2319 2320 2321 2322 2323 2324 2325 2326 2327 2328 2329 2330 2331 2332 2333 2334 2335 2336 2337 2338 2339 2340 2341 2342 2343 2344 2345 2346 2347 2348 2349 2350 2351 2352 2353 2354 2355 2356 2357 2358 2359 2360 2361 2362 2363 2364 2365 2366 2367 2368 2369 2370 2371 2372 2373 2374 2375 2376 2377 2378 2379 2380 2381 2382 2383 2384 2385 2386 2387 2388 2389 2390 2391 2392 2393 2394 2395 2396 2397 2398 2399 2400 2401 2402 2403 2404 2405 2406 2407 2408 2409 2410 2411 2412 2413 2414 2415 2416 2417 2418 2419 2420 2421 2422 2423 2424 2425 2426 2427 2428 2429 2430 2431 2432 2433 2434 2435 2436 2437 2438 2439 2440 2441 2442 2443 2444 2445 2446 2447 2448 2449 2450 2451 2452 2453 2454 2455 2456 2457 2458 2459 2460 2461 2462 2463 2464 2465 2466 2467 2468 2469 2470 2471 2472 2473 2474 2475 2476 2477 2478 2479 2480 2481 2482 2483 2484 2485 2486 2487 2488 2489 2490 2491 2492 2493 2494 2495 2496 2497 2498 2499 2500 2501 2502 2503 2504 2505 2506 2507 2508 2509 2510 2511 2512 2513 2514 2515 2516 2517 2518 2519 2520 2521 2522 2523 2524 2525 2526 2527 2528 2529 2530 2531 2532 2533 2534 2535 2536 2537 2538 2539 2540 2541 2542 2543 2544 2545 2546 2547 2548 2549 2550 2551 2552 2553 2554 2555 2556 2557 2558 2559 2560 2561 2562 2563 2564 2565 2566 2567 2568 2569 2570 2571 2572 2573 2574 2575 2576 2577 2578 2579 2580 2581 2582 2583 2584 2585 2586 2587 2588 2589 2590 2591 2592 2593 2594 2595 2596 2597 2598 2599 2600 2601 2602 2603 2604 2605 2606 2607 2608 2609 2610 2611 2612 2613 2614 2615 2616 2617 2618 2619 2620 2621 2622 2623 2624 2625 2626 2627 2628 2629 2630 2631 2632 2633 2634 2635 2636 2637 2638 2639 2640 2641 2642 2643 2644 2645 2646 2647 2648 2649 2650 2651 2652 2653 2654 2655 2656 2657 2658 2659 2660 2661 2662 2663 2664 2665 2666 2667 2668 2669 2670 2671 2672 2673 2674 2675 2676 2677 2678 2679 2680 2681 2682 2683 2684 2685 2686 2687 2688 2689 2690 2691 2692 2693 2694 2695 2696 2697 2698 2699 2700 2701 2702 2703 2704 | {
"paper_id": "P16-1027",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T08:56:05.704460Z"
},
"title": "Using Sentence-Level LSTM Language Models for Script Inference",
"authors": [
{
"first": "Karl",
"middle": [],
"last": "Pichotta",
"suffix": "",
"affiliation": {},
"email": "pichotta@cs.utexas.edu"
},
{
"first": "Raymond",
"middle": [
"J"
],
"last": "Mooney",
"suffix": "",
"affiliation": {},
"email": "mooney@cs.utexas.edu"
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "There is a small but growing body of research on statistical scripts, models of event sequences that allow probabilistic inference of implicit events from documents. These systems operate on structured verb-argument events produced by an NLP pipeline. We compare these systems with recent Recurrent Neural Net models that directly operate on raw tokens to predict sentences, finding the latter to be roughly comparable to the former in terms of predicting missing events in documents.",
"pdf_parse": {
"paper_id": "P16-1027",
"_pdf_hash": "",
"abstract": [
{
"text": "There is a small but growing body of research on statistical scripts, models of event sequences that allow probabilistic inference of implicit events from documents. These systems operate on structured verb-argument events produced by an NLP pipeline. We compare these systems with recent Recurrent Neural Net models that directly operate on raw tokens to predict sentences, finding the latter to be roughly comparable to the former in terms of predicting missing events in documents.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Statistical scripts are probabilistic models of event sequences (Chambers and Jurafsky, 2008) . A learned script model is capable of processing a document and inferring events that are probable but not explicitly stated. These models operate on automatically extracted structured events (for example, verbs with entity arguments), which are derived from standard NLP tools such as dependency parsers and coreference resolution engines.",
"cite_spans": [
{
"start": 64,
"end": 93,
"text": "(Chambers and Jurafsky, 2008)",
"ref_id": "BIBREF3"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Recent work has demonstrated that standard sequence models applied to such extracted event sequences, e.g. discriminative language models (Rudinger et al., 2015) and Long Short Term Memory (LSTM) recurrent neural nets (Pichotta and Mooney, 2016) , are able to infer held-out events more accurately than previous approaches. These results call into question the extent to which statistical event inference systems require linguistic preprocessing and syntactic structure. In an attempt to shed light on this issue, we compare existing script models to LSTMs trained as sentencelevel language models which try to predict the sequence of words in the next sentence from a learned representation of the previous sentences using no linguistic preprocessing.",
"cite_spans": [
{
"start": 138,
"end": 161,
"text": "(Rudinger et al., 2015)",
"ref_id": "BIBREF30"
},
{
"start": 218,
"end": 245,
"text": "(Pichotta and Mooney, 2016)",
"ref_id": "BIBREF29"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Some prior statistical script learning systems are focused on knowledge induction. These systems are primarily designed to induce collections of co-occurring event types involving the same entities, and their ability to infer held-out events is not their primary intended purpose (Chambers and Jurafsky, 2008; Ferraro and Van Durme, 2016, inter alia) . In the present work, we instead investigate the behavior of systems trained to directly optimize performance on the task of predicting subsequent events; in other words, we are investigating statistical models of events in discourse.",
"cite_spans": [
{
"start": 280,
"end": 309,
"text": "(Chambers and Jurafsky, 2008;",
"ref_id": "BIBREF3"
},
{
"start": 310,
"end": 350,
"text": "Ferraro and Van Durme, 2016, inter alia)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Much prior research on statistical script learning has also evaluated on inferring missing events from documents. However, the exact form that this task takes depends on the adopted definition of what constitutes an event: in previous work, events are defined in different ways, with differing degrees of structure. We consider simply using raw text, which requires no explicit syntactic annotation, as our mediating representation, and evaluate how raw text models compare to models of more structured events. Kiros et al. (2015) introduced skip-thought vector models, in which an RNN is trained to encode a sentence within a document into a lowdimensional vector that supports predicting the neighboring sentences in the document. Though the objective function used to train networks maximizes performance on the task of predicting sentences from their neighbors, Kiros et al. (2015) do not evaluate directly on the ability of networks to predict text; they instead demonstrate that the intermediate low-dimensional vector embeddings are useful for other tasks. We directly evaluate the text predictions produced by such sentence-level RNN encoder-decoder models, and measure their utility for the task of predicting subsequent events.",
"cite_spans": [
{
"start": 511,
"end": 530,
"text": "Kiros et al. (2015)",
"ref_id": "BIBREF17"
},
{
"start": 866,
"end": 885,
"text": "Kiros et al. (2015)",
"ref_id": "BIBREF17"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "We find that, on the task of predicting the text of held-out sentences, the systems we train to operate on the level of raw text generally outperform the systems we train to predict text mediated by automatically extracted event structures. On the other hand, if we run an NLP pipeline on the automatically generated text and extract structured events from these predictions, we achieve prediction performance roughly comparable to that of systems trained to predict events directly. The difference between word-level and event-level models on the task of event prediction is marginal, indicating that the task of predicting the next event, particularly in an encoder-decoder setup, may not necessarily need to be mediated by explicit event structures. To our knowledge, this is the first effort to evaluate sentence-level RNN language models directly on the task of predicting document text. Our results show that such models are useful for predicting missing information in text; and the fact that they require no linguistic preprocessing makes them more applicable to languages where quality parsing and co-reference tools are not available.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Scripts, structured models of stereotypical sequences of events, date back to AI research from the 1970s, in particular the seminal work of Schank and Abelson (1977) . In this conception, scripts are modeled as temporally ordered sequences of symbolic structured events. These models are nonprobabilistic and brittle, and pose serious problems for automated learning.",
"cite_spans": [
{
"start": 140,
"end": 165,
"text": "Schank and Abelson (1977)",
"ref_id": "BIBREF31"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Statistical Script Learning",
"sec_num": "2.1"
},
{
"text": "In recent years, there has been a growing body of research into statistical script learning systems, which enable statistical inference of implicit events from text. Chambers and Jurafsky (2008; 2009) describe a number of simple event co-occurrence based systems which infer (verb, dependency) pairs related to a particular discourse entity. For example, given the text:",
"cite_spans": [
{
"start": 166,
"end": 194,
"text": "Chambers and Jurafsky (2008;",
"ref_id": "BIBREF3"
},
{
"start": 195,
"end": 200,
"text": "2009)",
"ref_id": "BIBREF4"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Statistical Script Learning",
"sec_num": "2.1"
},
{
"text": "Andrew Wiles won the 2016 Abel prize for proving Fermat's last theorem, such a system will ideally be able to infer novel facts like (accept, subject) or (publish, subject) for the entity Andrew Wiles, and facts like (accept, object) for the entity Abel prize. A number of other systems inferring the same types of pair events have been shown to provide superior performance in modeling events in documents (Jans et al., 2012; Rudinger et al., 2015) . Pichotta and Mooney (2014) give a cooccurrence based script system that models and infers more complex multi-argument events from text. For example, in the above example, their model would ideally be able to infer a single event like accept(Wiles, prize), as opposed to the two simpler pairs from which it is composed. They provide evidence that modeling and inferring more complex multi-argument events also yields superior performance on the task of inferring simpler (verb, dependency) pair events. These events are constructed using only coreference information; that is, the learned event co-occurrence models do not directly incorporate noun information.",
"cite_spans": [
{
"start": 407,
"end": 426,
"text": "(Jans et al., 2012;",
"ref_id": "BIBREF14"
},
{
"start": 427,
"end": 449,
"text": "Rudinger et al., 2015)",
"ref_id": "BIBREF30"
},
{
"start": 452,
"end": 478,
"text": "Pichotta and Mooney (2014)",
"ref_id": "BIBREF28"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Statistical Script Learning",
"sec_num": "2.1"
},
{
"text": "More recently, Pichotta and Mooney (2016) presented an LSTM-based script inference model which models and infers multi-argument events, improving on previous systems on the task of inferring verbs with arguments. This system can incorporate both noun and coreference information about event arguments. We will use this multiargument event formulation (formalized below) and compare LSTM models using this event formulation to LSTM models using raw text.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Statistical Script Learning",
"sec_num": "2.1"
},
{
"text": "Recurrent Neural Networks (RNNs) are neural nets whose computation graphs have cycles. In particular, RNN sequence models are RNNs which map a sequence of inputs x 1 , . . . , x T to a sequence of outputs y 1 , . . . , y T via a learned latent vector whose value at timestep t is a function of its value at the previous timestep t \u2212 1.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Recurrent Neural Networks",
"sec_num": "2.2"
},
{
"text": "The most basic RNN sequence models, socalled \"vanilla RNNs\" (Elman, 1990) , are described by the following equations:",
"cite_spans": [
{
"start": 60,
"end": 73,
"text": "(Elman, 1990)",
"ref_id": "BIBREF9"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Recurrent Neural Networks",
"sec_num": "2.2"
},
{
"text": "z t = f (W i,z x t + W z,z z t\u22121 ) o t = g(W z,o z t )",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Recurrent Neural Networks",
"sec_num": "2.2"
},
{
"text": "where x t is the vector describing the input at time t; z t is the vector giving the hidden state at time t; o t is the vector giving the predicted output at time t; f and g are element-wise nonlinear functions (typically sigmoids, hyperbolic tangent, or rectified linear units); and W i,z , W z,z , and W z,o are learned matrices describing linear transformations. The recurrency in the computation graph arises from the fact that z t is a function of z t\u22121 .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Recurrent Neural Networks",
"sec_num": "2.2"
},
{
"text": "The more complex Long Short-Term Memory (LSTM) RNNs (Hochreiter and Schmidhuber, Figure 1 : Long Short-Term Memory unit at timestep t. The four nonlinearity nodes (i t , g t , f t , and o t ) all have, as inputs, x t and z t\u22121 . Small circles with dots are elementwise vector multiplications.",
"cite_spans": [],
"ref_spans": [
{
"start": 81,
"end": 89,
"text": "Figure 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Recurrent Neural Networks",
"sec_num": "2.2"
},
{
"text": "z t o t f t i t g t z t-1 x t m t",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Recurrent Neural Networks",
"sec_num": "2.2"
},
{
"text": "1997) have been shown to perform well on a wide variety of NLP tasks (Sutskever et al., 2014; Hermann et al., 2015; Vinyals et al., 2015, inter alia) . The LSTM we use is described by:",
"cite_spans": [
{
"start": 69,
"end": 93,
"text": "(Sutskever et al., 2014;",
"ref_id": "BIBREF34"
},
{
"start": 94,
"end": 115,
"text": "Hermann et al., 2015;",
"ref_id": "BIBREF12"
},
{
"start": 116,
"end": 149,
"text": "Vinyals et al., 2015, inter alia)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Recurrent Neural Networks",
"sec_num": "2.2"
},
{
"text": "i t = \u03c3 (W x,i x t + W z,i z t\u22121 + b i ) f t = \u03c3 (W x,f x t + W z,f z t\u22121 + b f ) o t = \u03c3 (W x,o x t + W h,i z t\u22121 + b o ) g t = tanh (W x,m x t + W z,m z t\u22121 + b g ) m t = f t \u2022 m t\u22121 + i t \u2022 g t z t = o t \u2022 tanh m t .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Recurrent Neural Networks",
"sec_num": "2.2"
},
{
"text": "The model is depicted graphically in Figure 1 . The memory vector m t is a function of both its previous value m t\u22121 and the input x t ; the vector z t is output both to any layers above the unit (which are trained to predict the output values y t ), and is additionally given as input to the LSTM unit at the next timestep t + 1. The W * , * matrices and b * vectors are learned model parameters, and u \u2022 v signifies element-wise multiplication.",
"cite_spans": [],
"ref_spans": [
{
"start": 37,
"end": 45,
"text": "Figure 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Recurrent Neural Networks",
"sec_num": "2.2"
},
{
"text": "RNN sequence models have recently been shown to be extremely effective for word-level and character-level language models (Mikolov et al., 2011; Jozefowicz et al., 2016) . At each timestep, these models take a word or character as input, update a hidden state vector, and predict the next timestep's word or character. There is also a growing body of work on training RNN encoderdecoder models for NLP problems. These systems first encode the entire input into the network's hidden state vector and then, in a second step, decode the entire output from this vector (Sutskever et al., 2014; Vinyals et al., 2015; Serban et al., 2016) .",
"cite_spans": [
{
"start": 122,
"end": 144,
"text": "(Mikolov et al., 2011;",
"ref_id": "BIBREF23"
},
{
"start": 145,
"end": 169,
"text": "Jozefowicz et al., 2016)",
"ref_id": "BIBREF16"
},
{
"start": 565,
"end": 589,
"text": "(Sutskever et al., 2014;",
"ref_id": "BIBREF34"
},
{
"start": 590,
"end": 611,
"text": "Vinyals et al., 2015;",
"ref_id": "BIBREF35"
},
{
"start": 612,
"end": 632,
"text": "Serban et al., 2016)",
"ref_id": "BIBREF32"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Sentence-Level RNN Language Models",
"sec_num": "2.3"
},
{
"text": "Sentence-level RNN language models, for example the skip-thought vector system of Kiros et al. (2015) , conceptually bridge these two approaches. Whereas standard language models are trained to predict the next token in the sequence of tokens, these systems are explicitly trained to predict the next sentence in the sequence of sentences. Kiros et al. (2015) train an encoder-decoder model to encode a sentence into a fixed-length vector and subsequently decode both the following and preceding sentence, using Gated Recurrent Units (Chung et al., 2014) . In the present work, we train an LSTM model to predict a sentence's successor, which is essentially the forward component of the skip-thought system. Kiros et al. (2015) use the skip-thought system as a means of projecting sentences into low-dimensional vector embeddings, demonstrating the utility of these embeddings on a number of other tasks; in contrast, we will use our trained sentence-level RNN language model directly on the task its objective function optimizes: predicting a sentence's successor.",
"cite_spans": [
{
"start": 82,
"end": 101,
"text": "Kiros et al. (2015)",
"ref_id": "BIBREF17"
},
{
"start": 340,
"end": 359,
"text": "Kiros et al. (2015)",
"ref_id": "BIBREF17"
},
{
"start": 534,
"end": 554,
"text": "(Chung et al., 2014)",
"ref_id": "BIBREF7"
},
{
"start": 707,
"end": 726,
"text": "Kiros et al. (2015)",
"ref_id": "BIBREF17"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Sentence-Level RNN Language Models",
"sec_num": "2.3"
},
{
"text": "The evaluation of inference-focused statistical script systems is not straightforward. Chambers and Jurafsky (2008) introduced the Narrative Cloze evaluation, in which a single event is held out from a document and systems are judged by the ability to infer this held-out event given the remaining events. This evaluation has been used by a number of published script systems (Chambers and Jurafsky, 2009; Jans et al., 2012; Pichotta and Mooney, 2014; Rudinger et al., 2015) . This automated evaluation measures systems' ability to model and predict events as they co-occur in text.",
"cite_spans": [
{
"start": 376,
"end": 405,
"text": "(Chambers and Jurafsky, 2009;",
"ref_id": "BIBREF4"
},
{
"start": 406,
"end": 424,
"text": "Jans et al., 2012;",
"ref_id": "BIBREF14"
},
{
"start": 425,
"end": 451,
"text": "Pichotta and Mooney, 2014;",
"ref_id": "BIBREF28"
},
{
"start": 452,
"end": 474,
"text": "Rudinger et al., 2015)",
"ref_id": "BIBREF30"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Narrative Cloze Evaluation",
"sec_num": "3.1"
},
{
"text": "The exact definition of the Narrative Cloze evaluation depends on the formulation of events used in a script system. For example, Chambers and Jurafsky 2008 2016evaluate inference of verbs with noun information about multiple arguments. In order to gather human judgments of inference quality, the latter also learn an encoder-decoder LSTM network for transforming verbs and noun arguments into English text to present to annotators for evaluation.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Narrative Cloze Evaluation",
"sec_num": "3.1"
},
{
"text": "We evaluate instead on the task of directly inferring sequences of words. That is, instead of defining the Narrative Cloze to be the evaluation of predictions of held-out events, we define the task to be the evaluation of predictions of held-out text; in this setup, predictions need not be mediated by noisy, automatically-extracted events. To evaluate inferred text against gold standard text, we argue that the BLEU metric (Papineni et al., 2002) , commonly used to evaluate Statistical Machine Translation systems, is a natural evaluation metric. It is an n-gram-level analog to the eventlevel Narrative Cloze evaluation: whereas the Narrative Cloze evaluates a system on its ability to reconstruct events as they occur in documents, BLEU evaluates a system on how well it reconstructs the n-grams.",
"cite_spans": [
{
"start": 426,
"end": 449,
"text": "(Papineni et al., 2002)",
"ref_id": "BIBREF27"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Narrative Cloze Evaluation",
"sec_num": "3.1"
},
{
"text": "This evaluation takes some inspiration from the evaluation of neural encoder-decoder translation models (Sutskever et al., 2014; Bahdanau et al., 2015) , which use similar architectures for the task of Machine Translation. That is, the task we present can be thought of as \"translating\" a sentence into its successor. While we do not claim that BLEU is necessarily the optimal way of evaluating text-level inferences, but we do claim that it is a natural ngram-level analog to the Narrative Cloze task on events.",
"cite_spans": [
{
"start": 104,
"end": 128,
"text": "(Sutskever et al., 2014;",
"ref_id": "BIBREF34"
},
{
"start": 129,
"end": 151,
"text": "Bahdanau et al., 2015)",
"ref_id": "BIBREF1"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Narrative Cloze Evaluation",
"sec_num": "3.1"
},
{
"text": "If a model infers text, we may also evaluate it on the task of inferring events by automatically extracting structured events from its output text (in the same way as events are extracted from natural text). This allows us to compare directly to previous event-based models on the task they are optimized for, namely, predicting structured events.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Narrative Cloze Evaluation",
"sec_num": "3.1"
},
{
"text": "Statistical script systems take a sequence of events from a document and infer additional events that are statistically probable. Exactly what constitutes an event varies: it may be a (verb, dependency) pair inferred as relating to a particular discourse entity (Chambers and Jurafsky, 2008; Rudinger et al., 2015) , a simplex verb (Chambers and Jurafsky, 2009; Orr et al., 2014) , or a verb with multiple arguments (Pichotta and Mooney, 2014) . In the present work, we adopt a representation of events as verbs with multiple arguments (Balasubramanian et al., 2013; Pichotta and Mooney, 2014; Modi and Titov, 2014) . Formally, we define an event to be a variadic tuple (v, s, o, p * ), where v is a verb, s is a noun standing in subject relation to v, o is a noun standing as a direct object to v, and p * denotes an arbitrary number of (pobj, prep) pairs, with prep a preposition and pobj a noun related to the verb v via the preposition prep. 1 Any argument except v may be null, indicating no noun fills that slot. For example, the text Napoleon sent the letter to Josephine would be represented by the event (sent, Napoleon, letter, (Josephine, to)).",
"cite_spans": [
{
"start": 262,
"end": 291,
"text": "(Chambers and Jurafsky, 2008;",
"ref_id": "BIBREF3"
},
{
"start": 292,
"end": 314,
"text": "Rudinger et al., 2015)",
"ref_id": "BIBREF30"
},
{
"start": 332,
"end": 361,
"text": "(Chambers and Jurafsky, 2009;",
"ref_id": "BIBREF4"
},
{
"start": 362,
"end": 379,
"text": "Orr et al., 2014)",
"ref_id": "BIBREF26"
},
{
"start": 416,
"end": 443,
"text": "(Pichotta and Mooney, 2014)",
"ref_id": "BIBREF28"
},
{
"start": 536,
"end": 566,
"text": "(Balasubramanian et al., 2013;",
"ref_id": "BIBREF2"
},
{
"start": 567,
"end": 593,
"text": "Pichotta and Mooney, 2014;",
"ref_id": "BIBREF28"
},
{
"start": 594,
"end": 615,
"text": "Modi and Titov, 2014)",
"ref_id": "BIBREF25"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Models",
"sec_num": "3.2"
},
{
"text": "We represent arguments by their grammatical head words.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Models",
"sec_num": "3.2"
},
{
"text": "We evaluate on a number of different neural models which differ in their input and output. All models are LSTM-based encoder-decoder models. These models encode a sentence (either its events or text) into a learned hidden vector state and then, subsequently, decode that vector into its successor sentence (either its events or its text).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Models",
"sec_num": "3.2"
},
{
"text": "Our general system architecture is as follows. At each timestep t, the input token is represented as a learned 100-dimensional embedding vector (learned jointly with the other parameters of the model), such that predictively similar words should get similar embeddings. This embedding is fed as input to the LSTM unit (that is, it will be the vector x t in Section 2.2, the input to the LSTM). The output of the LSTM unit (called z t in Section 2.2) is then fed to a softmax layer via a learned linear transformation.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Models",
"sec_num": "3.2"
},
{
"text": "During the encoding phase the network is not trained to produce any output. During the decoding phase the output is a one-hot representation of the subsequent timestep's input token (that is, with a V -word vocabulary, the output will be a V -dimensional vector with one 1 and V \u2212 1 zeros). In this way, the network is trained to consume an entire input sequence and, as a second step, iteratively output the subsequent timestep's Figure 2: Encoder-Decoder setup predicting the text \"Goodbye\" from \"Hello\" input, which allows the prediction of full output sequences. This setup is pictured diagrammatically in Figure 2 , which gives an example of input and output sequence for a token-level encoderdecoder model, encoding the sentence \"Hello .\" and decoding the successor sentence \"Goodbye .\" Note that we add beginning-of-sequence and end-of-sequence pseudo-tokens to sentences. This formulation allows a system to be trained which can encode a sentence and then infer a successor sentence by iteratively outputting next-input predictions until the </S> end-of-sentence pseudotoken is predicted. We use different LSTMs for encoding and decoding, as the dynamics of the two stages need not be identical.",
"cite_spans": [],
"ref_spans": [
{
"start": 610,
"end": 618,
"text": "Figure 2",
"ref_id": null
}
],
"eq_spans": [],
"section": "Models",
"sec_num": "3.2"
},
{
"text": "We notate the different systems as follows. Let s 1 be the input sentence and s 2 its successor sentence. Let t 1 denote the sequence of raw tokens in s 1 , and t 2 the tokens of s 2 . Further, let e 1 and e 2 be the sequence of structured events occurring in s 1 and s 2 , respectively (described in more detail in Section 4.1), and let e 2 [0] denote the first event of e 2 . The different systems we compare are named systematically as follows:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Models",
"sec_num": "3.2"
},
{
"text": "\u2022 The system t 1 t 2 is trained to encode a sentence's tokens and decode its successor's tokens.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Models",
"sec_num": "3.2"
},
{
"text": "\u2022 The system e 1 e 2 is trained to encode a sentence's events and decode its successor's events.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Models",
"sec_num": "3.2"
},
{
"text": "\u2022 The system e 1 e 2 t 2 is trained to encode a sentence's events, decode its successor's events, and then encode the latter and subsequently decode the successor's text.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Models",
"sec_num": "3.2"
},
{
"text": "We will not explicitly enumerate all systems, but other systems are defined analogously, with the schema X Y describing a system which is trained to encode X and subsequently decode Y , and X Y Z indicating a system which is trained to encode X, decode Y , and subsequently encode Y and decode Z. Note that in a system X Y Z, only X is provided as input. We also present results for systems of the form X a Y , which signifies that the system is trained to decode Y from X with the addition of an attention mechanism. We use the attention mechanism of Vinyals et al. (2015) . In short, these models have additional parameters which can learn soft alignments between positions of encoded inputs and positions in decoded outputs. Attention mechanisms have recently been shown to be quite empirically valuable in many complex sequence prediction tasks. For more details on the model, see Vinyals et al. (2015) . Figure 3 gives a diagrammatic representation of the different system setups. Text systems infer successor text and, optionally, parse that text and extract events from it; event sequences infer successor events and, optionally, expand inferred events into text. Note that the system t 1 t 2 , in which both the encoding and decoding steps operate on raw text, is essentially a one-directional version of the skip-thought system of Kiros et al. (2015) . 2 Further, the system e 1 e 2 t 2 , which is trained to take a sentence's event sequence as input, predict its successor's events, and then predict its successor's words, is comparable to the event inference system of Pichotta and Mooney (2016) ",
"cite_spans": [
{
"start": 552,
"end": 573,
"text": "Vinyals et al. (2015)",
"ref_id": "BIBREF35"
},
{
"start": 885,
"end": 906,
"text": "Vinyals et al. (2015)",
"ref_id": "BIBREF35"
},
{
"start": 1340,
"end": 1359,
"text": "Kiros et al. (2015)",
"ref_id": "BIBREF17"
},
{
"start": 1580,
"end": 1606,
"text": "Pichotta and Mooney (2016)",
"ref_id": "BIBREF29"
}
],
"ref_spans": [
{
"start": 909,
"end": 917,
"text": "Figure 3",
"ref_id": null
}
],
"eq_spans": [],
"section": "Models",
"sec_num": "3.2"
},
{
"text": "Figure 3: Different system setups for modeling the two-sentence sequence \"The dog chased the cat.\" followed by \"The cat ran away.\" The gray components inside dotted boxes are only present in some systems.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Text representation Event representation",
"sec_num": null
},
{
"text": "for event inference, and optionally transform inferred events to text using another LSTM; we, on the other hand, use an encoder/decoder setup to infer text directly.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Text representation Event representation",
"sec_num": null
},
{
"text": "We train a number of LSTM encoder-decoder networks which vary in their input and output. Models are trained on English Language Wikipedia, with 1% of the documents held out as a validation set. Our test set consists of 10,000 unseen sentences (from articles in neither the training nor validation set). We train models with batch stochastic gradient descent with momentum, minimizing the cross-entropy error of output predictions. All models are implemented in TensorFlow (Abadi et al., 2015) . We use a vocabulary of the 50,000 most frequent tokens, replacing all other tokens with an out-of-vocabulary pseudo-token. Learned word embeddings are 100-dimensional, and the latent LSTM vector is 500-dimensional. To extract events from text, we use the Stanford Dependency Parser (De Marneffe et al., 2006; Socher et al., 2013) . We use the Moses toolkit (Koehn et al., 2007) to calculate BLEU. 3 We evaluate the task of predicting held-out text with three metrics. The first metric is BLEU, which is standard BLEU (the geometric mean of modified 1-, 2-, 3-, and 4-gram precision against a gold standard, multiplied by a brevity penalty which penalizes short candidates). The second metric we present, BLEU-BP, is BLEU without the brevity 3 Via the script multi-bleu.pl. penalty: in the task of predicting successor sentences, depending on predictions' end use, ontopic brevity is not necessarily undesirable. Evaluations are over top system inferences (that is, decoding is done by taking the argmax). Finally, we also present values for unigram precision (1G P), one of the components of BLEU.",
"cite_spans": [
{
"start": 472,
"end": 492,
"text": "(Abadi et al., 2015)",
"ref_id": "BIBREF0"
},
{
"start": 781,
"end": 803,
"text": "Marneffe et al., 2006;",
"ref_id": "BIBREF8"
},
{
"start": 804,
"end": 824,
"text": "Socher et al., 2013)",
"ref_id": "BIBREF33"
},
{
"start": 852,
"end": 872,
"text": "(Koehn et al., 2007)",
"ref_id": "BIBREF18"
},
{
"start": 892,
"end": 893,
"text": "3",
"ref_id": null
},
{
"start": 1236,
"end": 1237,
"text": "3",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluation 4.1 Experimental Details",
"sec_num": "4"
},
{
"text": "We also evaluate on the task of predicting heldout verb-argument events, either directly or via inferred text. We use two evaluation metrics for this task. First, the Accuracy metric measures the percentage of a system's most confident guesses that are totally correct. That is, for each held-out event, a system makes its single most confident guess for that event, and we calculate the total percentage of such guesses which are totally correct. Some authors (e.g. Jans et al. (2012) , Pichotta and Mooney (2016) ) present results on the \"Recall at k\" metric, judging gold-standard recall against a list of top k event inferences; this metric is equivalent to \"Recall at 1.\" This is quite a stringent metric, as an inference is only counted correct if the verb and all arguments are correct. To relax this requirement, we also present results on what we call the Partial Credit metric, which is the percentage of held-out event components identical to the respective components in a system's top inference. 4 Table 1 gives the results of evaluating predicted successor sentence text against the gold standard using BLEU. ply reproduces the input sentence as its own successor. 5 Below this are systems which make predictions from event information, with systems which make predictions from raw text underneath. Transformations written X a Y are, recall, encoder-decoder LSTMs with attention. Note, first, that the text-level models outperform other models on BLEU. In particular, the two-step model e 1 e 2 t 2 (and comparable model with attention) which first predicts successor events and then, as a separate step, expands these events into text, performs quite poorly. This is perhaps due to the fact that the translation from text to events is lossy, so reconstructing raw sentence tokens is not straightforward.",
"cite_spans": [
{
"start": 467,
"end": 485,
"text": "Jans et al. (2012)",
"ref_id": "BIBREF14"
},
{
"start": 488,
"end": 514,
"text": "Pichotta and Mooney (2016)",
"ref_id": "BIBREF29"
},
{
"start": 1179,
"end": 1180,
"text": "5",
"ref_id": null
}
],
"ref_spans": [
{
"start": 1011,
"end": 1018,
"text": "Table 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Evaluation 4.1 Experimental Details",
"sec_num": "4"
},
{
"text": "The BLEU-BP scores, which are BLEU without the brevity penalty, are noticeably higher in the text-level models than the raw BLEU scores. This is in part because these models seem to produce shorter sentences, as illustrated below in section 4.4.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Experimental Evaluation",
"sec_num": "4.2"
},
{
"text": "The attention mechanism does not obviously benefit either text or event level prediction encoder-decoder models. This could be because there is not an obvious alignment structure between contiguous spans of raw text (or events) in natural documents.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Experimental Evaluation",
"sec_num": "4.2"
},
{
"text": "These results provide evidence that, if the Narrative Cloze task is defined to evaluate prediction of held-out text from a document, then sentencelevel RNN language models provide superior performance to RNN models operating at the event level. In other words, linguistic pre-processing does not obviously benefit encoder-decoder models trained to predict succeeding text. Table 2 gives results on the task of predicting the next verb with its nominal arguments; that is, whereas Table 1 gave results on a text analog to the Narrative Cloze evaluation (BLEU), Table 2 gives results on the verb-with-arguments prediction version. In the t 1 t 2 e 2 [0] system (and the comparable system with attention), events are extracted from automatically generated text by parsing output text and applying the same event extractor to this parse used to extract events from raw text. 6 The row labeled Most common in Table 2 gives performance for the baseline system which always guesses the most common event in the training set.",
"cite_spans": [
{
"start": 871,
"end": 872,
"text": "6",
"ref_id": null
}
],
"ref_spans": [
{
"start": 373,
"end": 380,
"text": "Table 2",
"ref_id": null
},
{
"start": 480,
"end": 487,
"text": "Table 1",
"ref_id": null
},
{
"start": 560,
"end": 567,
"text": "Table 2",
"ref_id": null
},
{
"start": 904,
"end": 911,
"text": "Table 2",
"ref_id": null
}
],
"eq_spans": [],
"section": "Experimental Evaluation",
"sec_num": "4.2"
},
{
"text": "The LSTM models trained to directly predict events are roughly comparable to systems which operate on raw text, performing slightly worse on accuracy and slightly better when taking partial credit into account. As with the previous comparisons with BLEU, the attention mechanism does not provide an obvious improvement when decoding inferences, perhaps, again, because the event inference problem lacks a clear alignment structure.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Experimental Evaluation",
"sec_num": "4.2"
},
{
"text": "These systems infer their most probable guesses of e 2 [0], the first event in the succeeding sentence. In order for a system prediction to be counted as correct, it must have the correct strings for grammatical head words of all components of the correct event. Note also that we judge only against a system's single most confident prediction (as opposed to some prior work (Jans et al., 2012; Pichotta and Mooney, 2014) which takes the top k predictions-the numbers presented here are therefore noticeably lower). We do this mainly for computational reasons: namely, a beam search over a full sentence's text would be quite computationally expensive.",
"cite_spans": [
{
"start": 375,
"end": 394,
"text": "(Jans et al., 2012;",
"ref_id": "BIBREF14"
},
{
"start": 395,
"end": 421,
"text": "Pichotta and Mooney, 2014)",
"ref_id": "BIBREF28"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Experimental Evaluation",
"sec_num": "4.2"
},
{
"text": "The results given above are for systems which encode information about one sentence and decode information about its successor. This is within the spirit of the skip-gram system of Kiros et al. (2015) , but we may wish to condition on more of the document. To investigate this, we perform an experiment varying the number of previous sentences input during the encoding step of t 1 t 2 text-level models without attention. We train three different models, which take either one, three, or five sentences as input, respectively, and are trained to output the successor sentence. Table 3 : Varying the amount of context in textlevel models. \"Num Prev Sents\" is the number of previous sentences supplied during encoding. Table 3 gives the results of running these models on 10,000 sentences from the validation set. As can be seen, in the training setup we investigate, more additional context sentences have a mixed effect, depending on the metric. This is perhaps due in part to the fact that we kept hyperparameters fixed between experiments, and a different hyperparameter regime would benefit predictions from longer input sequences. More investigation could prove fruitful. Figure 4 gives some example automatic nextsentence text predictions, along with the input sentence and the gold-standard next sentence. Note that gold-standard successor sentences frequently introduce new details not obviously inferrable from previous text. Top system predictions, on the other hand, are frequently fairly short. This is likely due part to the fact that the cross-entropy loss does not directly penalize short sentences and part to the fact that many details in gold-standard successor text are inherently difficult to predict.",
"cite_spans": [
{
"start": 181,
"end": 200,
"text": "Kiros et al. (2015)",
"ref_id": "BIBREF17"
}
],
"ref_spans": [
{
"start": 578,
"end": 585,
"text": "Table 3",
"ref_id": null
},
{
"start": 718,
"end": 725,
"text": "Table 3",
"ref_id": null
},
{
"start": 1177,
"end": 1185,
"text": "Figure 4",
"ref_id": null
}
],
"eq_spans": [],
"section": "Adding Additional Context",
"sec_num": "4.3"
},
{
"text": "The general low magnitude of the BLEU scores presented in Table 1 , especially in comparison to the scores typically reported in Machine Translation results, indicates the difficulty of the task. In open-domain text, a sentence is typically not straightforwardly predictable from preceding text; if it were, it would likely not be stated.",
"cite_spans": [],
"ref_spans": [
{
"start": 58,
"end": 65,
"text": "Table 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Discussion",
"sec_num": "4.5"
},
{
"text": "On the task of verb-argument prediction in Table 2, the difference between t 1 t 2 and e 1 e 2 [0] is fairly marginal. This raises the general question of how much explicit syntactic analysis is required for the task of event inference, particularly in the encoder/decoder setup. These results provide evidence that a sentence-level RNN language model which operates on raw tokens can predict what comes next in a document as well or nearly as well as an event-mediated script model.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion",
"sec_num": "4.5"
},
{
"text": "There are a number of further extensions to this work. First, in this work (and, more generally, Neural Machine Translation research), though generated text is evaluated using BLEU, systems are optimized for per-token cross-entropy error, which is a different objective (Luong et al. (2016) give an example of a system which improves cross-entropy error but reduces BLEU score in the Neural Machine Translation context). Finding differentiable objective functions that more directly target more complex evaluation metrics like BLEU is an interesting future research direction.",
"cite_spans": [
{
"start": 270,
"end": 290,
"text": "(Luong et al. (2016)",
"ref_id": "BIBREF21"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Future Work",
"sec_num": "5"
},
{
"text": "Relatedly, though we argue that BLEU is a natural token-sequence-level analog to the verbargument formulation of the Narrative Cloze task, it is not obviously the best metric for evaluating inferences of text, and comparing these automated metrics with human judgments is an important direction of future work. Pichotta and Mooney (2016) present results on crowdsourced human evaluation of script inferences that could be repeated for our RNN models.",
"cite_spans": [
{
"start": 311,
"end": 337,
"text": "Pichotta and Mooney (2016)",
"ref_id": "BIBREF29"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Future Work",
"sec_num": "5"
},
{
"text": "Though we focus here on forward-direction models predicting successor sentences, bidirectional encoder-decoder models, which predict sentences from both previous and subsequent text, are another interesting future research direction.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Future Work",
"sec_num": "5"
},
{
"text": "The use of scripts in AI dates back to the 1970s (Minsky, 1974; Schank and Abelson, 1977) ; in this conception, scripts were composed of complex events with no probabilistic semantics, which were difficult to learn automatically. In recent years, a growing body of research has investigated learning probabilistic co-occurrence models with simpler events. Chambers and Jurafsky (2008) propose a model of co-occurrence of (verb, dependency) pairs, which can be used to infer such Input:",
"cite_spans": [
{
"start": 49,
"end": 63,
"text": "(Minsky, 1974;",
"ref_id": "BIBREF24"
},
{
"start": 64,
"end": 89,
"text": "Schank and Abelson, 1977)",
"ref_id": "BIBREF31"
},
{
"start": 356,
"end": 384,
"text": "Chambers and Jurafsky (2008)",
"ref_id": "BIBREF3"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "6"
},
{
"text": "As of October 1 , 2008 , OOV changed its company name to Panasonic Corporation.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Related Work",
"sec_num": "6"
},
{
"text": "OOV products that were branded \"National\" in Japan are currently marketed under the \"Panasonic\" brand. Predicted: The company's name is now OOV .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Gold:",
"sec_num": null
},
{
"text": "White died two days after Curly Bill shot him.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Input:",
"sec_num": null
},
{
"text": "Before dying, White testified that he thought the pistol had accidentally discharged and that he did not believe that Curly Bill shot him on purpose. Predicted: He was buried at OOV Cemetery.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Gold:",
"sec_num": null
},
{
"text": "The foundation stone was laid in 1867.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Input:",
"sec_num": null
},
{
"text": "The members of the predominantly Irish working class parish managed to save \u00a3700 towards construction, a large sum at the time. Predicted: The OOV was founded in the early 20th century.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Gold:",
"sec_num": null
},
{
"text": "Soldiers arrive to tell him that OOV has been seen in camp and they call for his capture and death. Gold:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Input:",
"sec_num": null
},
{
"text": "OOV agrees . Predicted:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Input:",
"sec_num": null
},
{
"text": "OOV is killed by the OOV . Figure 4 : Sample next-sentence text predictions. OOV is the out-of-vocabulary pseudo-token, which frequently replaces proper names.",
"cite_spans": [],
"ref_spans": [
{
"start": 27,
"end": 35,
"text": "Figure 4",
"ref_id": null
}
],
"eq_spans": [],
"section": "Input:",
"sec_num": null
},
{
"text": "pairs from documents; Jans et al. 2012give a superior model in the same general framework. Chambers and Jurafsky (2009) give a method of generalizing from single sequences of pair events to collections of such sequences. Rudinger et al. (2015) apply a discriminative language model to the (verb, dependency) sequence modeling task, raising the question of to what extent event inference can be performed with standard language models applied to event sequences. Pichotta and Mooney (2014) describe a method of learning a co-occurrence based model of verbs with multiple coreference-based entity arguments.",
"cite_spans": [
{
"start": 91,
"end": 119,
"text": "Chambers and Jurafsky (2009)",
"ref_id": "BIBREF4"
},
{
"start": 221,
"end": 243,
"text": "Rudinger et al. (2015)",
"ref_id": "BIBREF30"
},
{
"start": 462,
"end": 488,
"text": "Pichotta and Mooney (2014)",
"ref_id": "BIBREF28"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Input:",
"sec_num": null
},
{
"text": "There is a body of related work focused on learning models of co-occurring events to automatically induce templates of complex events comprising multiple verbs and arguments, aimed ultimately at maximizing coherency of templates (Chambers, 2013; Cheung et al., 2013; Balasubramanian et al., 2013) . Ferraro and Van Durme (2016) give a model integrating various levels of event information of increasing abstraction, evaluating both on coherence of induced templates and log-likelihood of predictions of held-out events.",
"cite_spans": [
{
"start": 229,
"end": 245,
"text": "(Chambers, 2013;",
"ref_id": "BIBREF5"
},
{
"start": 246,
"end": 266,
"text": "Cheung et al., 2013;",
"ref_id": "BIBREF6"
},
{
"start": 267,
"end": 296,
"text": "Balasubramanian et al., 2013)",
"ref_id": "BIBREF2"
},
{
"start": 299,
"end": 327,
"text": "Ferraro and Van Durme (2016)",
"ref_id": "BIBREF10"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Input:",
"sec_num": null
},
{
"text": "McIntyre and Lapata (2010) describe a system that learns a model of co-occurring events and uses this model to automatically generate stories via a Genetic Algorithm.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Input:",
"sec_num": null
},
{
"text": "There have been a number of recent published neural models for various event-and discourserelated tasks. Pichotta and Mooney (2016) show that an LSTM event sequence model outperforms previous co-occurrence methods for predicting verbs with arguments. Granroth-Wilding and Clark (2016) describe a feedforward neu-ral network which composes verbs and arguments into low-dimensional vectors, evaluating on a multiple-choice version of the Narrative Cloze task. Modi and Titov (2014) describe a feedforward network which is trained to predict event orderings. Kiros et al. (2015) give a method of embedding sentences in low-dimensional space such that embeddings are predictive of neighboring sentences. Li et al. (2014) and Ji and Eisenstein (2015) , use RNNs for discourse parsing; Liu et al. (2016) use a Convolutional Neural Network for implicit discourse relation classification.",
"cite_spans": [
{
"start": 105,
"end": 131,
"text": "Pichotta and Mooney (2016)",
"ref_id": "BIBREF29"
},
{
"start": 458,
"end": 479,
"text": "Modi and Titov (2014)",
"ref_id": "BIBREF25"
},
{
"start": 556,
"end": 575,
"text": "Kiros et al. (2015)",
"ref_id": "BIBREF17"
},
{
"start": 700,
"end": 716,
"text": "Li et al. (2014)",
"ref_id": "BIBREF19"
},
{
"start": 721,
"end": 745,
"text": "Ji and Eisenstein (2015)",
"ref_id": "BIBREF15"
},
{
"start": 780,
"end": 797,
"text": "Liu et al. (2016)",
"ref_id": "BIBREF20"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Input:",
"sec_num": null
},
{
"text": "We have given what we believe to be the first systematic evaluation of sentence-level RNN language models on the task of predicting held-out document text. We have found that models operating on raw text perform roughly comparably to identical models operating on predicate-argument event structures when predicting the latter, and that text models provide superior predictions of raw text. This provides evidence that, for the task of held-out event prediction, encoder/decoder models mediated by automatically extracted events may not be learning appreciably more structure than systems trained on raw tokens alone.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "7"
},
{
"text": "This is essentially the event representation ofPichotta and Mooney (2016), but whereas they limited events to having a single prepositional phrase, we allow an arbitrary number, and we do not lemmatize words.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "The system ofKiros et al. (2015), in addition to being trained to predict the next sentence, also contains a backwarddirectional RNN trained to predict a sentence's predecessor; we condition only on previous text.Kiros et al. (2015) also use Gated Recurrent Units instead of LSTM.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "This metric was used inPichotta and Mooney (2014), but there it was called Accuracy. In the present work, we use \"accuracy\" only to mean Recall at 1.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "\"t1 t1\" is minor abuse of notation, as the system is not an encoder/decoder but a simple identity function.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "This is also a minor abuse of notation, as the second transformation uses a statistical parser rather than an encoder/decoder.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [
{
"text": "Thanks to Stephen Roller, Amelia Harrison, and the UT NLP group for their help and feedback. Thanks also to the anonymous reviewers for their very helpful suggestions. This research was supported in part by the DARPA DEFT program under AFRL grant FA8750-13-2-0026.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Acknowledgments",
"sec_num": null
},
{
"text": "Our Wikipedia dump from which the training, development, and test sets are constructed is from Jan 2, 2014. We parse text using version 3.3.1 of the Stanford CoreNLP system. We use a vocab consisting of the 50,000 most common tokens, replacing all others with an Out-of-vocabulary pseudotoken. We train using batch stochastic gradient descent with momentum with a batch size of 10 sequences, using an initial learning rate of 0.1, damping the learning rate by 0.99 any time the previous hundred updates' average test error is greater than any of the average losses in the previous ten groups of hundred updates. Our momentum parameter is 0.95. Our embedding vectors are 100-dimensional, and our LSTM hidden state is 500-dimensional. We train all models for 300k batch updates (with the exception of the models compared in \u00a74.3, all of which we train for 150k batch updates, as training is appreciably slower with longer input sequences). Training takes approximately 36 hours on an NVIDIA Titan Black GPU.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "A Supplemental Material",
"sec_num": null
}
],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "TensorFlow: Large-scale machine learning on heterogeneous systems",
"authors": [
{
"first": "Mart\u00edn",
"middle": [],
"last": "Abadi",
"suffix": ""
},
{
"first": "Ashish",
"middle": [],
"last": "Agarwal",
"suffix": ""
},
{
"first": "Paul",
"middle": [],
"last": "Barham",
"suffix": ""
},
{
"first": "Eugene",
"middle": [],
"last": "Brevdo",
"suffix": ""
},
{
"first": "Zhifeng",
"middle": [],
"last": "Chen",
"suffix": ""
},
{
"first": "Craig",
"middle": [],
"last": "Citro",
"suffix": ""
},
{
"first": "Greg",
"middle": [
"S"
],
"last": "Corrado",
"suffix": ""
},
{
"first": "Andy",
"middle": [],
"last": "Davis",
"suffix": ""
},
{
"first": "Jeffrey",
"middle": [],
"last": "Dean",
"suffix": ""
},
{
"first": "Matthieu",
"middle": [],
"last": "Devin",
"suffix": ""
},
{
"first": "Sanjay",
"middle": [],
"last": "Ghemawat",
"suffix": ""
},
{
"first": "Ian",
"middle": [],
"last": "Goodfellow",
"suffix": ""
},
{
"first": "Andrew",
"middle": [],
"last": "Harp",
"suffix": ""
},
{
"first": "Geoffrey",
"middle": [],
"last": "Irving",
"suffix": ""
},
{
"first": "Michael",
"middle": [],
"last": "Isard",
"suffix": ""
},
{
"first": "Yangqing",
"middle": [],
"last": "Jia",
"suffix": ""
},
{
"first": "Rafal",
"middle": [],
"last": "Jozefowicz",
"suffix": ""
},
{
"first": "Lukasz",
"middle": [],
"last": "Kaiser",
"suffix": ""
},
{
"first": "Manjunath",
"middle": [],
"last": "Kudlur",
"suffix": ""
},
{
"first": "Josh",
"middle": [],
"last": "Levenberg",
"suffix": ""
}
],
"year": 2015,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Mart\u00edn Abadi, Ashish Agarwal, Paul Barham, Eugene Brevdo, Zhifeng Chen, Craig Citro, Greg S. Cor- rado, Andy Davis, Jeffrey Dean, Matthieu Devin, Sanjay Ghemawat, Ian Goodfellow, Andrew Harp, Geoffrey Irving, Michael Isard, Yangqing Jia, Rafal Jozefowicz, Lukasz Kaiser, Manjunath Kudlur, Josh Levenberg, Dan Man\u00e9, Rajat Monga, Sherry Moore, Derek Murray, Chris Olah, Mike Schuster, Jonathon Shlens, Benoit Steiner, Ilya Sutskever, Kunal Tal- war, Paul Tucker, Vincent Vanhoucke, Vijay Vasude- van, Fernanda Vi\u00e9gas, Oriol Vinyals, Pete Warden, Martin Wattenberg, Martin Wicke, Yuan Yu, and Xi- aoqiang Zheng. 2015. TensorFlow: Large-scale machine learning on heterogeneous systems. Soft- ware available from tensorflow.org.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Neural machine translation by jointly learning to align and translate",
"authors": [
{
"first": "Dzmitry",
"middle": [],
"last": "Bahdanau",
"suffix": ""
},
{
"first": "Kyunghyun",
"middle": [],
"last": "Cho",
"suffix": ""
},
{
"first": "Yoshua",
"middle": [],
"last": "Bengio",
"suffix": ""
}
],
"year": 2015,
"venue": "Proceedings of the 2015 International Conference on Learning Representations",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Dzmitry Bahdanau, KyungHyun Cho, and Yoshua Bengio. 2015. Neural machine translation by jointly learning to align and translate. In Proceed- ings of the 2015 International Conference on Learn- ing Representations (ICLR 2015).",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Generating coherent event schemas at scale",
"authors": [
{
"first": "Niranjan",
"middle": [],
"last": "Balasubramanian",
"suffix": ""
},
{
"first": "Stephen",
"middle": [],
"last": "Soderland",
"suffix": ""
},
{
"first": "Mausam",
"middle": [],
"last": "",
"suffix": ""
},
{
"first": "Oren",
"middle": [],
"last": "Etzioni",
"suffix": ""
}
],
"year": 2013,
"venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Niranjan Balasubramanian, Stephen Soderland, Mausam, and Oren Etzioni. 2013. Generating coherent event schemas at scale. In Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing (EMNLP-2013).",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Unsupervised learning of narrative event chains",
"authors": [
{
"first": "Nathanael",
"middle": [],
"last": "Chambers",
"suffix": ""
},
{
"first": "Daniel",
"middle": [],
"last": "Jurafsky",
"suffix": ""
}
],
"year": 2008,
"venue": "Proceedings of the 46th Annual Meeting of the Association for Computational Linguistics (ACL-08)",
"volume": "",
"issue": "",
"pages": "789--797",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Nathanael Chambers and Daniel Jurafsky. 2008. Un- supervised learning of narrative event chains. In Proceedings of the 46th Annual Meeting of the As- sociation for Computational Linguistics (ACL-08), pages 789-797.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Unsupervised learning of narrative schemas and their participants",
"authors": [
{
"first": "Nathanael",
"middle": [],
"last": "Chambers",
"suffix": ""
},
{
"first": "Dan",
"middle": [],
"last": "Jurafsky",
"suffix": ""
}
],
"year": 2009,
"venue": "Proceedings of the 47th Annual Meeting of the Association for Computational Linguistics (ACL-09)",
"volume": "",
"issue": "",
"pages": "602--610",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Nathanael Chambers and Dan Jurafsky. 2009. Unsu- pervised learning of narrative schemas and their par- ticipants. In Proceedings of the 47th Annual Meet- ing of the Association for Computational Linguistics (ACL-09), pages 602-610.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Event schema induction with a probabilistic entity-driven model",
"authors": [
{
"first": "Nathanael",
"middle": [],
"last": "Chambers",
"suffix": ""
}
],
"year": 2013,
"venue": "Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Nathanael Chambers. 2013. Event schema induc- tion with a probabilistic entity-driven model. In Proceedings of the 2013 Conference on Empirical Methods in Natural Language Processing (EMNLP- 2013).",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Probabilistic frame induction",
"authors": [
{
"first": "Jackie Chi Kit",
"middle": [],
"last": "Cheung",
"suffix": ""
},
{
"first": "Hoifung",
"middle": [],
"last": "Poon",
"suffix": ""
},
{
"first": "Lucy",
"middle": [],
"last": "Vanderwende",
"suffix": ""
}
],
"year": 2013,
"venue": "Proceedings of the 2013 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (NAACL-13)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jackie Chi Kit Cheung, Hoifung Poon, and Lucy Van- derwende. 2013. Probabilistic frame induction. In Proceedings of the 2013 Conference of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies (NAACL-13).",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "Empirical evaluation of gated recurrent neural networks on sequence modeling",
"authors": [
{
"first": "Junyoung",
"middle": [],
"last": "Chung",
"suffix": ""
},
{
"first": "Caglar",
"middle": [],
"last": "Gulcehre",
"suffix": ""
},
{
"first": "Kyunghyun",
"middle": [],
"last": "Cho",
"suffix": ""
},
{
"first": "Yoshua",
"middle": [],
"last": "Bengio",
"suffix": ""
}
],
"year": 2014,
"venue": "NIPS Deep Learning Workshop",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Junyoung Chung, Caglar Gulcehre, KyungHyun Cho, and Yoshua Bengio. 2014. Empirical evaluation of gated recurrent neural networks on sequence model- ing. In NIPS Deep Learning Workshop.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Generating typed dependency parses from phrase structure parses",
"authors": [
{
"first": "Marie-Catherine De",
"middle": [],
"last": "Marneffe",
"suffix": ""
},
{
"first": "Bill",
"middle": [],
"last": "Maccartney",
"suffix": ""
},
{
"first": "Christopher",
"middle": [
"D"
],
"last": "Manning",
"suffix": ""
}
],
"year": 2006,
"venue": "Proceedings of the 5th International Conference on Language Resources & Evaluation (LREC-2006)",
"volume": "6",
"issue": "",
"pages": "449--454",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Marie-Catherine De Marneffe, Bill MacCartney, and Christopher D. Manning. 2006. Generating typed dependency parses from phrase structure parses. In Proceedings of the 5th International Conference on Language Resources & Evaluation (LREC-2006), volume 6, pages 449-454.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Finding structure in time",
"authors": [
{
"first": "Jeffrey",
"middle": [
"L"
],
"last": "Elman",
"suffix": ""
}
],
"year": 1990,
"venue": "Cognitive Science",
"volume": "14",
"issue": "",
"pages": "179--211",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jeffrey L. Elman. 1990. Finding structure in time. Cognitive Science, 14:179-211.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "A unified Bayesian model of scripts, frames and language",
"authors": [
{
"first": "Francis",
"middle": [],
"last": "Ferraro",
"suffix": ""
},
{
"first": "Benjamin",
"middle": [],
"last": "Van Durme",
"suffix": ""
}
],
"year": 2016,
"venue": "Proceedings of the 30th AAAI Conference on Artificial Intelligence (AAAI-16)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Francis Ferraro and Benjamin Van Durme. 2016. A unified Bayesian model of scripts, frames and lan- guage. In Proceedings of the 30th AAAI Conference on Artificial Intelligence (AAAI-16).",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "What happens next? Event prediction using a compositional neural network model",
"authors": [
{
"first": "Mark",
"middle": [],
"last": "Granroth",
"suffix": ""
},
{
"first": "-",
"middle": [],
"last": "Wilding",
"suffix": ""
},
{
"first": "Stephen",
"middle": [],
"last": "Clark",
"suffix": ""
}
],
"year": 2016,
"venue": "Proceedings of the 30th AAAI Conference on Artificial Intelligence (AAAI-16)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Mark Granroth-Wilding and Stephen Clark. 2016. What happens next? Event prediction using a com- positional neural network model. In Proceedings of the 30th AAAI Conference on Artificial Intelligence (AAAI-16).",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Teaching machines to read and comprehend",
"authors": [
{
"first": "Karl",
"middle": [],
"last": "Moritz Hermann",
"suffix": ""
},
{
"first": "Tom\u00e1\u0161",
"middle": [],
"last": "Ko\u010disk\u1ef3",
"suffix": ""
},
{
"first": "Edward",
"middle": [],
"last": "Grefenstette",
"suffix": ""
},
{
"first": "Lasse",
"middle": [],
"last": "Espeholt",
"suffix": ""
},
{
"first": "Will",
"middle": [],
"last": "Kay",
"suffix": ""
},
{
"first": "Mustafa",
"middle": [],
"last": "Suleyman",
"suffix": ""
},
{
"first": "Phil",
"middle": [],
"last": "Blunsom",
"suffix": ""
}
],
"year": 2015,
"venue": "Proceedings of the 29th Annual Conference on Neural Information Processing Systems (NIPS-15)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Karl Moritz Hermann, Tom\u00e1\u0161 Ko\u010disk\u1ef3, Edward Grefenstette, Lasse Espeholt, Will Kay, Mustafa Su- leyman, and Phil Blunsom. 2015. Teaching ma- chines to read and comprehend. In Proceedings of the 29th Annual Conference on Neural Information Processing Systems (NIPS-15).",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "Long short-term memory",
"authors": [
{
"first": "Sepp",
"middle": [],
"last": "Hochreiter",
"suffix": ""
},
{
"first": "J\u00fcrgen",
"middle": [],
"last": "Schmidhuber",
"suffix": ""
}
],
"year": 1997,
"venue": "Neural Computation",
"volume": "9",
"issue": "8",
"pages": "1735--1780",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural Computation, 9(8):1735-1780.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "Skip n-grams and ranking functions for predicting script events",
"authors": [
{
"first": "Bram",
"middle": [],
"last": "Jans",
"suffix": ""
},
{
"first": "Steven",
"middle": [],
"last": "Bethard",
"suffix": ""
},
{
"first": "Ivan",
"middle": [],
"last": "Vuli\u0107",
"suffix": ""
},
{
"first": "Marie",
"middle": [
"Francine"
],
"last": "Moens",
"suffix": ""
}
],
"year": 2012,
"venue": "Proceedings of the 13th Conference of the European Chapter of the Association for Computational Linguistics (EACL-12)",
"volume": "",
"issue": "",
"pages": "336--344",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Bram Jans, Steven Bethard, Ivan Vuli\u0107, and Marie Francine Moens. 2012. Skip n-grams and ranking functions for predicting script events. In Proceedings of the 13th Conference of the Euro- pean Chapter of the Association for Computational Linguistics (EACL-12), pages 336-344.",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "One vector is not enough: Entity-augmented distributional semantics for discourse relations",
"authors": [
{
"first": "Yangfeng",
"middle": [],
"last": "Ji",
"suffix": ""
},
{
"first": "Jacob",
"middle": [],
"last": "Eisenstein",
"suffix": ""
}
],
"year": 2015,
"venue": "Transactions of the Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Yangfeng Ji and Jacob Eisenstein. 2015. One vector is not enough: Entity-augmented distributional se- mantics for discourse relations. Transactions of the Association for Computational Linguistics (TACL).",
"links": null
},
"BIBREF16": {
"ref_id": "b16",
"title": "Exploring the limits of language modeling",
"authors": [
{
"first": "Rafal",
"middle": [],
"last": "Jozefowicz",
"suffix": ""
},
{
"first": "Oriol",
"middle": [],
"last": "Vinyals",
"suffix": ""
},
{
"first": "Mike",
"middle": [],
"last": "Schuster",
"suffix": ""
},
{
"first": "Noam",
"middle": [],
"last": "Shazeer",
"suffix": ""
},
{
"first": "Yonghui",
"middle": [],
"last": "Wu",
"suffix": ""
}
],
"year": 2016,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {
"arXiv": [
"arXiv:1602.02410"
]
},
"num": null,
"urls": [],
"raw_text": "Rafal Jozefowicz, Oriol Vinyals, Mike Schuster, Noam Shazeer, and Yonghui Wu. 2016. Exploring the limits of language modeling. arXiv preprint arXiv:1602.02410.",
"links": null
},
"BIBREF17": {
"ref_id": "b17",
"title": "Raquel Urtasun, and Sanja Fidler",
"authors": [
{
"first": "Ryan",
"middle": [],
"last": "Kiros",
"suffix": ""
},
{
"first": "Yukun",
"middle": [],
"last": "Zhu",
"suffix": ""
},
{
"first": "Ruslan",
"middle": [],
"last": "Salakhutdinov",
"suffix": ""
},
{
"first": "Richard",
"middle": [
"S"
],
"last": "Zemel",
"suffix": ""
},
{
"first": "Antonio",
"middle": [],
"last": "Torralba",
"suffix": ""
}
],
"year": 2015,
"venue": "Proceedings of the 29th Annual Conference on Neural Information Processing Systems (NIPS-15)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Ryan Kiros, Yukun Zhu, Ruslan Salakhutdinov, Richard S. Zemel, Antonio Torralba, Raquel Urta- sun, and Sanja Fidler. 2015. Skip-thought vectors. In Proceedings of the 29th Annual Conference on Neural Information Processing Systems (NIPS-15).",
"links": null
},
"BIBREF18": {
"ref_id": "b18",
"title": "Moses: Open source toolkit for statistical machine translation",
"authors": [
{
"first": "Philipp",
"middle": [],
"last": "Koehn",
"suffix": ""
},
{
"first": "Hieu",
"middle": [],
"last": "Hoang",
"suffix": ""
},
{
"first": "Alexandra",
"middle": [],
"last": "Birch",
"suffix": ""
},
{
"first": "Chris",
"middle": [],
"last": "Callison-Burch",
"suffix": ""
},
{
"first": "Marcello",
"middle": [],
"last": "Federico",
"suffix": ""
},
{
"first": "Nicola",
"middle": [],
"last": "Bertoldi",
"suffix": ""
},
{
"first": "Brooke",
"middle": [],
"last": "Cowan",
"suffix": ""
},
{
"first": "Wade",
"middle": [],
"last": "Shen",
"suffix": ""
},
{
"first": "Christine",
"middle": [],
"last": "Moran",
"suffix": ""
},
{
"first": "Richard",
"middle": [],
"last": "Zens",
"suffix": ""
},
{
"first": "Chris",
"middle": [],
"last": "Dyer",
"suffix": ""
},
{
"first": "Ondrej",
"middle": [],
"last": "Bojar",
"suffix": ""
},
{
"first": "Alexandra",
"middle": [],
"last": "Constantin",
"suffix": ""
},
{
"first": "Evan",
"middle": [],
"last": "Herbst",
"suffix": ""
}
],
"year": 2007,
"venue": "Proceedings of the 45th Annual Meeting of the Association for Computational Linguistics (ACL-07) Companion Volume: Proceedings of the Demo and Poster Sessions",
"volume": "",
"issue": "",
"pages": "177--180",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Philipp Koehn, Hieu Hoang, Alexandra Birch, Chris Callison-Burch, Marcello Federico, Nicola Bertoldi, Brooke Cowan, Wade Shen, Christine Moran, Richard Zens, Chris Dyer, Ondrej Bojar, Alexandra Constantin, and Evan Herbst. 2007. Moses: Open source toolkit for statistical machine translation. In Proceedings of the 45th Annual Meeting of the As- sociation for Computational Linguistics (ACL-07) Companion Volume: Proceedings of the Demo and Poster Sessions, pages 177-180, Prague, Czech Re- public.",
"links": null
},
"BIBREF19": {
"ref_id": "b19",
"title": "Recursive deep models for discourse parsing",
"authors": [
{
"first": "Jiwei",
"middle": [],
"last": "Li",
"suffix": ""
},
{
"first": "Rumeng",
"middle": [],
"last": "Li",
"suffix": ""
},
{
"first": "Eduard",
"middle": [],
"last": "Hovy",
"suffix": ""
}
],
"year": 2014,
"venue": "Proceedings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP)",
"volume": "",
"issue": "",
"pages": "2061--2069",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Jiwei Li, Rumeng Li, and Eduard Hovy. 2014. Recur- sive deep models for discourse parsing. In Proceed- ings of the 2014 Conference on Empirical Methods in Natural Language Processing (EMNLP), pages 2061-2069, October.",
"links": null
},
"BIBREF20": {
"ref_id": "b20",
"title": "Implicit discourse relation classification via multi-task neural networks",
"authors": [
{
"first": "Yang",
"middle": [],
"last": "Liu",
"suffix": ""
},
{
"first": "Sujian",
"middle": [],
"last": "Li",
"suffix": ""
},
{
"first": "Xiaodong",
"middle": [],
"last": "Zhang",
"suffix": ""
},
{
"first": "Zhifang",
"middle": [],
"last": "Sui",
"suffix": ""
}
],
"year": 2016,
"venue": "Proceedings of the 30th AAAI Conference on Artificial Intelligence (AAAI-16)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Yang Liu, Sujian Li, Xiaodong Zhang, and Zhifang Sui. 2016. Implicit discourse relation classification via multi-task neural networks. In Proceedings of the 30th AAAI Conference on Artificial Intelligence (AAAI-16).",
"links": null
},
"BIBREF21": {
"ref_id": "b21",
"title": "Multi-task sequence to sequence learning",
"authors": [
{
"first": "Minh-Thang",
"middle": [],
"last": "Luong",
"suffix": ""
},
{
"first": "Quoc",
"middle": [
"V"
],
"last": "Le",
"suffix": ""
},
{
"first": "Ilya",
"middle": [],
"last": "Sutskever",
"suffix": ""
},
{
"first": "Oriol",
"middle": [],
"last": "Vinyals",
"suffix": ""
},
{
"first": "Lukasz",
"middle": [],
"last": "Kaiser",
"suffix": ""
}
],
"year": 2016,
"venue": "Proceedings of the 4th International Conference on Learning Representations",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Minh-Thang Luong, Quoc V. Le, Ilya Sutskever, Oriol Vinyals, and Lukasz Kaiser. 2016. Multi-task se- quence to sequence learning. In Proceedings of the 4th International Conference on Learning Represen- tations (ICLR-16).",
"links": null
},
"BIBREF22": {
"ref_id": "b22",
"title": "Plot induction and evolutionary search for story generation",
"authors": [
{
"first": "Neil",
"middle": [],
"last": "Mcintyre",
"suffix": ""
},
{
"first": "Mirella",
"middle": [],
"last": "Lapata",
"suffix": ""
}
],
"year": 2010,
"venue": "Proceedings of the 48th Annual Meeting of the Association for Computational Linguistics (ACL-10)",
"volume": "",
"issue": "",
"pages": "1562--1572",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Neil McIntyre and Mirella Lapata. 2010. Plot induc- tion and evolutionary search for story generation. In Proceedings of the 48th Annual Meeting of the As- sociation for Computational Linguistics (ACL-10), pages 1562-1572.",
"links": null
},
"BIBREF23": {
"ref_id": "b23",
"title": "Empirical evaluation and combination of advanced language modeling techniques",
"authors": [
{
"first": "Tomas",
"middle": [],
"last": "Mikolov",
"suffix": ""
},
{
"first": "Anoop",
"middle": [],
"last": "Deoras",
"suffix": ""
},
{
"first": "Stefan",
"middle": [],
"last": "Kombrink",
"suffix": ""
},
{
"first": "Lukas",
"middle": [],
"last": "Burget",
"suffix": ""
}
],
"year": 2011,
"venue": "Proceedings of the 12th Annual Conference of the International Speech Communication Association",
"volume": "",
"issue": "",
"pages": "605--608",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Tomas Mikolov, Anoop Deoras, Stefan Kombrink, Lukas Burget, and Jan Cernock\u1ef3. 2011. Empir- ical evaluation and combination of advanced lan- guage modeling techniques. In Proceedings of the 12th Annual Conference of the International Speech Communication Association 2011 (INTERSPEECH 2011), pages 605-608.",
"links": null
},
"BIBREF24": {
"ref_id": "b24",
"title": "A framework for representing knowledge",
"authors": [
{
"first": "Marvin",
"middle": [],
"last": "Minsky",
"suffix": ""
}
],
"year": 1974,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Marvin Minsky. 1974. A framework for representing knowledge. Technical report, MIT-AI Laboratory.",
"links": null
},
"BIBREF25": {
"ref_id": "b25",
"title": "Inducing neural models of script knowledge",
"authors": [
{
"first": "Ashutosh",
"middle": [],
"last": "Modi",
"suffix": ""
},
{
"first": "Ivan",
"middle": [],
"last": "Titov",
"suffix": ""
}
],
"year": 2014,
"venue": "Proceedings of the Eighteenth Conference on Computational Natural Language Learning (CoNLL-2014)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Ashutosh Modi and Ivan Titov. 2014. Inducing neu- ral models of script knowledge. In Proceedings of the Eighteenth Conference on Computational Nat- ural Language Learning (CoNLL-2014), Baltimore, MD, USA.",
"links": null
},
"BIBREF26": {
"ref_id": "b26",
"title": "Learning scripts as Hidden Markov Models",
"authors": [
{
"first": "J Walker",
"middle": [],
"last": "Orr",
"suffix": ""
},
{
"first": "Prasad",
"middle": [],
"last": "Tadepalli",
"suffix": ""
},
{
"first": "Janardhan",
"middle": [],
"last": "Rao Doppa",
"suffix": ""
},
{
"first": "Xiaoli",
"middle": [],
"last": "Fern",
"suffix": ""
},
{
"first": "Thomas",
"middle": [
"G"
],
"last": "Dietterich",
"suffix": ""
}
],
"year": 2014,
"venue": "Proceedings of the 28th AAAI Conference on Artificial Intelligence (AAAI-14)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "J Walker Orr, Prasad Tadepalli, Janardhan Rao Doppa, Xiaoli Fern, and Thomas G Dietterich. 2014. Learning scripts as Hidden Markov Models. In Pro- ceedings of the 28th AAAI Conference on Artificial Intelligence (AAAI-14).",
"links": null
},
"BIBREF27": {
"ref_id": "b27",
"title": "BLEU: a method for automatic evaluation of machine translation",
"authors": [
{
"first": "Kishore",
"middle": [],
"last": "Papineni",
"suffix": ""
},
{
"first": "Salim",
"middle": [],
"last": "Roukos",
"suffix": ""
},
{
"first": "Todd",
"middle": [],
"last": "Ward",
"suffix": ""
},
{
"first": "Wei-Jing",
"middle": [],
"last": "Zhu",
"suffix": ""
}
],
"year": 2002,
"venue": "Proceedings of the 40th Annual Meeting of the Association for Computational Linguistics (ACL-02)",
"volume": "",
"issue": "",
"pages": "311--318",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. BLEU: a method for automatic evaluation of machine translation. In Proceedings of the 40th Annual Meeting of the Association for Com- putational Linguistics (ACL-02), pages 311-318.",
"links": null
},
"BIBREF28": {
"ref_id": "b28",
"title": "Statistical script learning with multi-argument events",
"authors": [
{
"first": "Karl",
"middle": [],
"last": "Pichotta",
"suffix": ""
},
{
"first": "Raymond",
"middle": [
"J"
],
"last": "Mooney",
"suffix": ""
}
],
"year": 2014,
"venue": "Proceedings of the 14th Conference of the European Chapter of the Association for Computational Linguistics (EACL 2014)",
"volume": "",
"issue": "",
"pages": "220--229",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Karl Pichotta and Raymond J. Mooney. 2014. Statis- tical script learning with multi-argument events. In Proceedings of the 14th Conference of the European Chapter of the Association for Computational Lin- guistics (EACL 2014), pages 220-229.",
"links": null
},
"BIBREF29": {
"ref_id": "b29",
"title": "Learning statistical scripts with LSTM recurrent neural networks",
"authors": [
{
"first": "Karl",
"middle": [],
"last": "Pichotta",
"suffix": ""
},
{
"first": "Raymond",
"middle": [
"J"
],
"last": "Mooney",
"suffix": ""
}
],
"year": 2016,
"venue": "Proceedings of the 30th AAAI Conference on Artificial Intelligence (AAAI-16)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Karl Pichotta and Raymond J. Mooney. 2016. Learn- ing statistical scripts with LSTM recurrent neural networks. In Proceedings of the 30th AAAI Con- ference on Artificial Intelligence (AAAI-16).",
"links": null
},
"BIBREF30": {
"ref_id": "b30",
"title": "Script induction as language modeling",
"authors": [
{
"first": "Rachel",
"middle": [],
"last": "Rudinger",
"suffix": ""
},
{
"first": "Pushpendre",
"middle": [],
"last": "Rastogi",
"suffix": ""
},
{
"first": "Francis",
"middle": [],
"last": "Ferraro",
"suffix": ""
},
{
"first": "Benjamin",
"middle": [],
"last": "Van Durme",
"suffix": ""
}
],
"year": 2015,
"venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Rachel Rudinger, Pushpendre Rastogi, Francis Ferraro, and Benjamin Van Durme. 2015. Script induction as language modeling. In Proceedings of the 2015 Conference on Empirical Methods in Natural Lan- guage Processing (EMNLP-15).",
"links": null
},
"BIBREF31": {
"ref_id": "b31",
"title": "Scripts, Plans, Goals and Understanding: An Inquiry into Human Knowledge Structures",
"authors": [
{
"first": "C",
"middle": [],
"last": "Roger",
"suffix": ""
},
{
"first": "Robert",
"middle": [
"P"
],
"last": "Schank",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Abelson",
"suffix": ""
}
],
"year": 1977,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Roger C. Schank and Robert P. Abelson. 1977. Scripts, Plans, Goals and Understanding: An Inquiry into Human Knowledge Structures. Lawrence Erlbaum and Associates.",
"links": null
},
"BIBREF32": {
"ref_id": "b32",
"title": "Building end-to-end dialogue systems using generative hierarchical neural network models",
"authors": [
{
"first": "V",
"middle": [],
"last": "Iulian",
"suffix": ""
},
{
"first": "Alessandro",
"middle": [],
"last": "Serban",
"suffix": ""
},
{
"first": "Yoshua",
"middle": [],
"last": "Sordoni",
"suffix": ""
},
{
"first": "Aaron",
"middle": [],
"last": "Bengio",
"suffix": ""
},
{
"first": "Joelle",
"middle": [],
"last": "Courville",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Pineau",
"suffix": ""
}
],
"year": 2016,
"venue": "Proceedings of the 30th AAAI Conference on Artificial Intelligence (AAAI-16)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Iulian V. Serban, Alessandro Sordoni, Yoshua Bengio, Aaron Courville, and Joelle Pineau. 2016. Building end-to-end dialogue systems using generative hier- archical neural network models. In Proceedings of the 30th AAAI Conference on Artificial Intelligence (AAAI-16).",
"links": null
},
"BIBREF33": {
"ref_id": "b33",
"title": "Parsing with compositional vector grammars",
"authors": [
{
"first": "Richard",
"middle": [],
"last": "Socher",
"suffix": ""
},
{
"first": "John",
"middle": [],
"last": "Bauer",
"suffix": ""
},
{
"first": "Christopher",
"middle": [
"D"
],
"last": "Manning",
"suffix": ""
},
{
"first": "Andrew",
"middle": [
"Y"
],
"last": "Ng",
"suffix": ""
}
],
"year": 2013,
"venue": "Proceedings of the 51st Annual Meeting of the Association for Computational Linguistics (ACL-13)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Richard Socher, John Bauer, Christopher D. Manning, and Andrew Y. Ng. 2013. Parsing with compo- sitional vector grammars. In Proceedings of the 51st Annual Meeting of the Association for Compu- tational Linguistics (ACL-13).",
"links": null
},
"BIBREF34": {
"ref_id": "b34",
"title": "Sequence to sequence learning with neural networks",
"authors": [
{
"first": "Ilya",
"middle": [],
"last": "Sutskever",
"suffix": ""
},
{
"first": "Oriol",
"middle": [],
"last": "Vinyals",
"suffix": ""
},
{
"first": "V",
"middle": [],
"last": "Quoc",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Le",
"suffix": ""
}
],
"year": 2014,
"venue": "Proceedings of the 28th Annual Conference on Neural Information Processing Systems (NIPS-14)",
"volume": "",
"issue": "",
"pages": "3104--3112",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Ilya Sutskever, Oriol Vinyals, and Quoc V. Le. 2014. Sequence to sequence learning with neural net- works. In Proceedings of the 28th Annual Con- ference on Neural Information Processing Systems (NIPS-14), pages 3104-3112.",
"links": null
},
"BIBREF35": {
"ref_id": "b35",
"title": "Grammar as a foreign language",
"authors": [
{
"first": "Oriol",
"middle": [],
"last": "Vinyals",
"suffix": ""
},
{
"first": "\u0141ukasz",
"middle": [],
"last": "Kaiser",
"suffix": ""
},
{
"first": "Terry",
"middle": [],
"last": "Koo",
"suffix": ""
},
{
"first": "Slav",
"middle": [],
"last": "Petrov",
"suffix": ""
},
{
"first": "Ilya",
"middle": [],
"last": "Sutskever",
"suffix": ""
},
{
"first": "Geoffrey",
"middle": [],
"last": "Hinton",
"suffix": ""
}
],
"year": 2015,
"venue": "Proceedings of the 29th Annual Conference on Neural Information Processing Systems (NIPS-15)",
"volume": "",
"issue": "",
"pages": "2755--2763",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Oriol Vinyals, \u0141ukasz Kaiser, Terry Koo, Slav Petrov, Ilya Sutskever, and Geoffrey Hinton. 2015. Gram- mar as a foreign language. In Proceedings of the 29th Annual Conference on Neural Information Pro- cessing Systems (NIPS-15), pages 2755-2763.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"type_str": "figure",
"text": ", Jans et al. (2012), and Rudinger et al. (2015) evaluate inference of heldout (verb, dependency) pairs from documents; Pichotta and Mooney (2014) evaluate inference of verbs with coreference information about multiple arguments; and Pichotta and Mooney",
"uris": null,
"num": null
}
}
}
} |