File size: 75,976 Bytes
6fa4bc9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 | {
"paper_id": "D07-1013",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T16:19:03.582157Z"
},
"title": "Characterizing the Errors of Data-Driven Dependency Parsing Models",
"authors": [
{
"first": "Ryan",
"middle": [],
"last": "Mcdonald",
"suffix": "",
"affiliation": {},
"email": ""
},
{
"first": "Joakim",
"middle": [],
"last": "Nivre",
"suffix": "",
"affiliation": {},
"email": "nivre@msi.vxu.se"
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "We present a comparative error analysis of the two dominant approaches in datadriven dependency parsing: global, exhaustive, graph-based models, and local, greedy, transition-based models. We show that, in spite of similar performance overall, the two models produce different types of errors, in a way that can be explained by theoretical properties of the two models. This analysis leads to new directions for parser development.",
"pdf_parse": {
"paper_id": "D07-1013",
"_pdf_hash": "",
"abstract": [
{
"text": "We present a comparative error analysis of the two dominant approaches in datadriven dependency parsing: global, exhaustive, graph-based models, and local, greedy, transition-based models. We show that, in spite of similar performance overall, the two models produce different types of errors, in a way that can be explained by theoretical properties of the two models. This analysis leads to new directions for parser development.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "Syntactic dependency representations have a long history in descriptive and theoretical linguistics and many formal models have been advanced (Hudson, 1984; Mel'\u010duk, 1988; Sgall et al., 1986; Maruyama, 1990) . A dependency graph of a sentence represents each word and its syntactic modifiers through labeled directed arcs, as shown in Figure 1 , taken from the Prague Dependency Treebank (B\u00f6hmov\u00e1 et al., 2003) . A primary advantage of dependency representations is that they have a natural mechanism for representing discontinuous constructions, arising from long distance dependencies or free word order, through non-projective dependency arcs, exemplified by the arc from jedna to Z in Figure 1 .",
"cite_spans": [
{
"start": 142,
"end": 156,
"text": "(Hudson, 1984;",
"ref_id": "BIBREF4"
},
{
"start": 157,
"end": 171,
"text": "Mel'\u010duk, 1988;",
"ref_id": "BIBREF9"
},
{
"start": 172,
"end": 191,
"text": "Sgall et al., 1986;",
"ref_id": "BIBREF15"
},
{
"start": 192,
"end": 207,
"text": "Maruyama, 1990)",
"ref_id": "BIBREF5"
},
{
"start": 388,
"end": 410,
"text": "(B\u00f6hmov\u00e1 et al., 2003)",
"ref_id": "BIBREF0"
}
],
"ref_spans": [
{
"start": 335,
"end": 343,
"text": "Figure 1",
"ref_id": "FIGREF0"
},
{
"start": 689,
"end": 697,
"text": "Figure 1",
"ref_id": "FIGREF0"
}
],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Syntactic dependency graphs have recently gained a wide interest in the computational linguistics community and have been successfully employed for many problems ranging from machine translation (Ding and Palmer, 2004) to ontology construction (Snow et al., 2004) . In this work we focus on a common parsing paradigm called datadriven dependency parsing. Unlike grammar-based parsing, data-driven approaches learn to produce dependency graphs for sentences solely from an annotated corpus. The advantage of such models is that they are easily ported to any domain or language in which annotated resources exist.",
"cite_spans": [
{
"start": 195,
"end": 218,
"text": "(Ding and Palmer, 2004)",
"ref_id": "BIBREF3"
},
{
"start": 244,
"end": 263,
"text": "(Snow et al., 2004)",
"ref_id": "BIBREF16"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "As evident from the CoNLL-X shared task on dependency parsing (Buchholz and Marsi, 2006) , there are currently two dominant models for data-driven dependency parsing. The first is what Buchholz and Marsi (2006) call the \"all-pairs\" approach, where every possible arc is considered in the construction of the optimal parse. The second is the \"stepwise\" approach, where the optimal parse is built stepwise and where the subset of possible arcs considered depend on previous decisions. Theoretically, these models are extremely different. The all-pairs models are globally trained, use exact (or near exact) inference algorithms, and define features over a limited history of parsing decisions. The stepwise models use local training and greedy inference algorithms, but define features over a rich history of parse decisions. However, both models obtain similar parsing accuracies Table 1 : Labeled parsing accuracy for top scoring systems at CoNLL-X (Buchholz and Marsi, 2006) . on a variety of languages, as seen in Table 1 , which shows results for the two top performing systems in the CoNLL-X shared task, McDonald et al. (2006) (\"all-pairs\") and (\"stepwise\") .",
"cite_spans": [
{
"start": 62,
"end": 88,
"text": "(Buchholz and Marsi, 2006)",
"ref_id": "BIBREF1"
},
{
"start": 185,
"end": 210,
"text": "Buchholz and Marsi (2006)",
"ref_id": "BIBREF1"
},
{
"start": 949,
"end": 975,
"text": "(Buchholz and Marsi, 2006)",
"ref_id": "BIBREF1"
},
{
"start": 1109,
"end": 1131,
"text": "McDonald et al. (2006)",
"ref_id": "BIBREF8"
},
{
"start": 1150,
"end": 1162,
"text": "(\"stepwise\")",
"ref_id": null
}
],
"ref_spans": [
{
"start": 879,
"end": 886,
"text": "Table 1",
"ref_id": null
},
{
"start": 1016,
"end": 1023,
"text": "Table 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Despite the similar performance in terms of overall accuracy, there are indications that the two types of models exhibit different behaviour. For example, Sagae and Lavie (2006) displayed that combining the predictions of both parsing models can lead to significantly improved accuracies. In order to pave the way for new and better methods, a much more detailed error analysis is needed to understand the strengths and weaknesses of different approaches. In this work we set out to do just that, focusing on the two top performing systems from the CoNLL-X shared task as representatives of the two dominant models in data-driven dependency parsing.",
"cite_spans": [
{
"start": 155,
"end": 177,
"text": "Sagae and Lavie (2006)",
"ref_id": "BIBREF14"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Let L = {l 1 , . . . , l |L| } be a set of permissible arc labels. Let x = w 0 , w 1 , . . . , w n be an input sentence where w 0 =root. Formally, a dependency graph for an input sentence x is a labeled directed graph G = (V, A) consisting of a set of nodes V and a set of labeled directed arcs A \u2286 V \u00d7 V \u00d7 L, i.e., if (i, j, l) \u2208 A for i, j \u2208 V and l \u2208 L, then there is an arc from node i to node j with label l in the graph. A dependency graph G for sentence x must satisfy the following properties:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Preliminaries",
"sec_num": "2.1"
},
{
"text": "1. V = {0, 1, . . . , n} 2. If (i, j, l) \u2208 A, then j = 0. 3. If (i, j, l) \u2208 A, then for all i \u2208 V \u2212 {i} and l \u2208 L, (i , j, l ) / \u2208 A.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Preliminaries",
"sec_num": "2.1"
},
{
"text": "4. For all j \u2208 V \u2212 {0}, there is a (possibly empty) sequence of nodes i 1 , . . . , i m \u2208V and labels",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Preliminaries",
"sec_num": "2.1"
},
{
"text": "l 1 , . . . , l m , l\u2208L such that (0, i 1 , l 1 ),(i 1 , i 2 , l 2 ), . . . , (i m , j, l)\u2208A.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Preliminaries",
"sec_num": "2.1"
},
{
"text": "The constraints state that the dependency graph spans the entire input (1); that the node 0 is a root (2); that each node has at most one incoming arc in the graph (3); and that the graph is connected through directed paths from the node 0 to every other node in the graph (4). A dependency graph satisfying these constraints is a directed tree originating out of the root node 0. We say that an arc (i, j, l) is nonprojective if not all words k occurring between i and j in the linear order are dominated by i (where dominance is the transitive closure of the arc relation).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Preliminaries",
"sec_num": "2.1"
},
{
"text": "For an input sentence, x = w 0 , w 1 , . . . , w n consider the dense graph",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Global, Exhaustive, Graph-Based Parsing",
"sec_num": "2.2"
},
{
"text": "G x = (V x , A x )",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Global, Exhaustive, Graph-Based Parsing",
"sec_num": "2.2"
},
{
"text": "where:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Global, Exhaustive, Graph-Based Parsing",
"sec_num": "2.2"
},
{
"text": "1. V x = {0, 1, . . . , n} 2. A x = {(i, j, l) | \u2200 i, j \u2208 V x and l \u2208 L} Let D(G x )",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Global, Exhaustive, Graph-Based Parsing",
"sec_num": "2.2"
},
{
"text": "represent the subgraphs of graph G x that are valid dependency graphs for the sentence x. Since G x contains all possible labeled arcs, the set D(G x ) must necessarily contain all valid dependency graphs for x.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Global, Exhaustive, Graph-Based Parsing",
"sec_num": "2.2"
},
{
"text": "Assume that there exists a dependency arc scoring function, s : V \u00d7 V \u00d7 L \u2192 R. Furthermore, define the score of a graph as the sum of its arc scores,",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Global, Exhaustive, Graph-Based Parsing",
"sec_num": "2.2"
},
{
"text": "s(G = (V, A)) = (i,j,l)\u2208A s(i, j, l)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Global, Exhaustive, Graph-Based Parsing",
"sec_num": "2.2"
},
{
"text": "The score of a dependency arc, s(i, j, l) represents the likelihood of creating a dependency from word w i to word w j with the label l. If the arc score function is known a priori, then the parsing problem can be stated as,",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Global, Exhaustive, Graph-Based Parsing",
"sec_num": "2.2"
},
{
"text": "G = arg max G\u2208D(Gx) s(G) = arg max G\u2208D(Gx) (i,j,l)\u2208A s(i, j, l)",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Global, Exhaustive, Graph-Based Parsing",
"sec_num": "2.2"
},
{
"text": "This problem is equivalent to finding the highest scoring directed spanning tree in the graph G x originating out of the root node 0, which can be solved for both the labeled and unlabeled case in O(n 2 ) time (McDonald et al., 2005b) . In this approach, nonprojective arcs are produced naturally through the inference algorithm that searches over all possible directed trees, whether projective or not.",
"cite_spans": [
{
"start": 210,
"end": 234,
"text": "(McDonald et al., 2005b)",
"ref_id": "BIBREF7"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Global, Exhaustive, Graph-Based Parsing",
"sec_num": "2.2"
},
{
"text": "The parsing models of McDonald work primarily in this framework. To learn arc scores, these models use large-margin structured learning algorithms (McDonald et al., 2005a) , which optimize the parameters of the model to maximize the score margin between the correct dependency graph and all incorrect dependency graphs for every sentence in a training set. The learning procedure is global since model parameters are set relative to the classification of the entire dependency graph, and not just over single arc attachment decisions. The primary disadvantage of these models is that the feature representation is restricted to a limited number of graph arcs. This restriction is required so that both inference and learning are tractable.",
"cite_spans": [
{
"start": 147,
"end": 171,
"text": "(McDonald et al., 2005a)",
"ref_id": "BIBREF6"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Global, Exhaustive, Graph-Based Parsing",
"sec_num": "2.2"
},
{
"text": "The specific model studied in this work is that presented by McDonald et al. (2006) , which factors scores over pairs of arcs (instead of just single arcs) and uses near exhaustive search for unlabeled parsing coupled with a separate classifier to label each arc. We call this system MSTParser, which is also the name of the freely available implementation. 1",
"cite_spans": [
{
"start": 61,
"end": 83,
"text": "McDonald et al. (2006)",
"ref_id": "BIBREF8"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Global, Exhaustive, Graph-Based Parsing",
"sec_num": "2.2"
},
{
"text": "A transition system for dependency parsing defines 1. a set C of parser configurations, each of which defines a (partially built) dependency graph G 2. a set T of transitions, each a function t : C \u2192 C 3. for every sentence",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Local, Greedy, Transition-Based Parsing",
"sec_num": "2.3"
},
{
"text": "EQUATION",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [
{
"start": 0,
"end": 8,
"text": "EQUATION",
"ref_id": "EQREF",
"raw_str": "x = w 0 , w 1 , . . . , w n ,",
"eq_num": "(a)"
}
],
"section": "Local, Greedy, Transition-Based Parsing",
"sec_num": "2.3"
},
{
"text": "a unique initial configuration c x (b) a set C x of terminal configurations A transition sequence C x,m = (c x , c 1 , . . . , c m ) for a sentence x is a sequence of configurations such that c m \u2208 C x and, for every c i (c i = c x ), there is a transition t \u2208 T such that c i = t(c i\u22121 ). The dependency graph assigned to x by C x,m is the graph G m defined by the terminal configuration c m . Assume that there exists a transition scoring function, s : C \u00d7 T \u2192 R. The score of a transition t in a configuration c, s(c, t), represents the likelihood of taking transition t out of configuration c. The parsing problem consists in finding a terminal configuration c m \u2208 C x , starting from the initial configuration c x and taking the optimal transition t * = arg max t\u2208T s(c, t) out of every configuration c. This can be seen as a greedy search for the optimal dependency graph, based on a sequence of locally optimal decisions in terms of the transition system.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Local, Greedy, Transition-Based Parsing",
"sec_num": "2.3"
},
{
"text": "Many transition systems for data-driven dependency parsing are inspired by shift-reduce parsing, where configurations contain a stack for storing partially processed nodes. Transitions in such systems add arcs to the dependency graph and/or manipulate the stack. One example is the transition system defined by Nivre (2003) , which parses a sentence x = w 0 , w 1 , . . . , w n in O(n) time, producing a projective dependency graph satisfying conditions 1-4 in section 2.1, possibly after adding arcs (0, i, l r ) for every node i = 0 that is a root in the output graph (where l r is a special label for root modifiers). Nivre and Nilsson (2005) showed how the restriction to projective dependency graphs could be lifted by using graph transformation techniques to preprocess training data and post-process parser output, so-called pseudo-projective parsing.",
"cite_spans": [
{
"start": 311,
"end": 323,
"text": "Nivre (2003)",
"ref_id": "BIBREF12"
},
{
"start": 621,
"end": 645,
"text": "Nivre and Nilsson (2005)",
"ref_id": "BIBREF10"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Local, Greedy, Transition-Based Parsing",
"sec_num": "2.3"
},
{
"text": "To learn transition scores, these systems use discriminative learning methods, e.g., memory-based learning or support vector machines. The learning procedure is local since only single transitions are scored, not entire transition sequences. The primary advantage of these models is that features are not restricted to a limited number of graph arcs but can take into account the entire dependency graph built so far. The main disadvantage is that the greedy parsing strategy may lead to error propagation.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Local, Greedy, Transition-Based Parsing",
"sec_num": "2.3"
},
{
"text": "The specific model studied in this work is that presented by , which uses labeled pseudo-projective parsing with support vector machines. We call this system MaltParser, which is also the name of the freely available implementation. 2",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Local, Greedy, Transition-Based Parsing",
"sec_num": "2.3"
},
{
"text": "These models differ primarily with respect to three important properties.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Comparison",
"sec_num": "2.4"
},
{
"text": "1. Inference: MaltParser uses a transition-based inference algorithm that greedily chooses the best parsing decision based on a trained classifier and current parser history. MSTParser instead uses near exhaustive search over a dense graphical representation of the sentence to find the dependency graph that maximizes the score.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Comparison",
"sec_num": "2.4"
},
{
"text": "2. Training: MaltParser trains a model to make a single classification decision (choose the next transition). MSTParser trains a model to maximize the global score of correct graphs.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Comparison",
"sec_num": "2.4"
},
{
"text": "3. Feature Representation: MaltParser can introduce a rich feature history based on previous parser decisions. MSTParser is forced to restrict the score of features to a single or pair of nearby parsing decisions in order to make exhaustive inference tractable.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Comparison",
"sec_num": "2.4"
},
{
"text": "These differences highlight an inherent trade-off between exhaustive inference algorithms plus global learning and expressiveness of feature representations. MSTParser favors the former at the expense of the latter and MaltParser the opposite.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Comparison",
"sec_num": "2.4"
},
{
"text": "The CoNLL-X shared task (Buchholz and Marsi, 2006 ) was a large-scale evaluation of data-driven dependency parsers, with data from 13 different languages and 19 participating systems. The official evaluation metric was the labeled attachment score (LAS), defined as the percentage of tokens, excluding punctuation, that are assigned both the correct head and the correct dependency label. 3 The output of all systems that participated in the shared task are available for download and constitute a rich resource for comparative error analysis.",
"cite_spans": [
{
"start": 24,
"end": 49,
"text": "(Buchholz and Marsi, 2006",
"ref_id": "BIBREF1"
},
{
"start": 389,
"end": 390,
"text": "3",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "The CoNLL-X Shared Task",
"sec_num": "3"
},
{
"text": "The data used in the experiments below are the outputs of MSTParser and MaltParser for all 13 languages, together with the corresponding gold standard graphs used in the evaluation. We constructed the data by simply concatenating a system's output for every language. This resulted in a single output file for each system and a corresponding single gold standard file. This method is sound because the data sets for each language contain approximately the same number of tokens -5,000. Thus, evaluating system performance over the aggregated files can be roughly viewed as measuring system performance through an equally weighted arithmetic mean over the languages.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "The CoNLL-X Shared Task",
"sec_num": "3"
},
{
"text": "It could be argued that a language by language comparison would be more appropriate than comparing system performance across all languages. However, as table Table 1 shows, the difference in accuracy between the two systems is typically small for all languages, and only in a few cases is this difference significant. Furthermore, by aggregating over all languages we gain better statistical estimates of parser errors, since the data set for each individual language is very small.",
"cite_spans": [],
"ref_spans": [
{
"start": 152,
"end": 165,
"text": "table Table 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "The CoNLL-X Shared Task",
"sec_num": "3"
},
{
"text": "The primary purpose of this study is to characterize the errors made by standard data-driven dependency parsing models. To that end, we present a large set of experiments that relate parsing errors to a set of linguistic and structural properties of the input and predicted/gold standard dependency graphs. We argue that the results can be correlated to specific theoretical aspects of each model -in particular the trade-off highlighted in Section 2.4.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Error Analysis",
"sec_num": "4"
},
{
"text": "For simplicity, all experiments report labeled parsing accuracies. Identical experiments using unlabeled parsing accuracies did not reveal any additional information. Furthermore, all experiments are based on the data from all 13 languages together, as explained in section 3.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Error Analysis",
"sec_num": "4"
},
{
"text": "It is well known that parsing systems tend to have lower accuracies for longer sentences. Figure 2 shows the accuracy of both parsing models relative to sentence length (in bins of size 10: 1-10, 11-20, etc.). System performance is almost indistinguishable. However, MaltParser tends to perform better on shorter sentences, which require the greedy inference algorithm to make less parsing decisions. As a result, the chance of error propagation is reduced significantly when parsing these sentences. The fact that MaltParser has a higher accuracy (rather than the same accuracy) when the likelihood of error propagation is reduced comes from its richer feature representation.",
"cite_spans": [],
"ref_spans": [
{
"start": 90,
"end": 98,
"text": "Figure 2",
"ref_id": "FIGREF1"
}
],
"eq_spans": [],
"section": "Length Factors",
"sec_num": "4.1"
},
{
"text": "Another interesting property is accuracy relative to dependency length. The length of a dependency from word w i to word w j is simply equal to |i \u2212 j|. Longer dependencies typically represent modifiers of the root or the main verb in a sentence. Shorter dependencies are often modifiers of nouns such as determiners or adjectives or pronouns modifying their direct neighbours. Figure 3 measures the precision and recall for each system relative to dependency lengths in the predicted and gold standard dependency graphs. Precision represents the percentage of predicted arcs of length d that were correct. Recall measures the percentage of gold standard arcs of length d that were correctly predicted.",
"cite_spans": [],
"ref_spans": [
{
"start": 378,
"end": 386,
"text": "Figure 3",
"ref_id": "FIGREF2"
}
],
"eq_spans": [],
"section": "Length Factors",
"sec_num": "4.1"
},
{
"text": "Here we begin to see separation between the two systems. MSTParser is far more precise for longer dependency arcs, whereas MaltParser does better for shorter dependency arcs. This behaviour can be explained using the same reasoning as above: shorter arcs are created before longer arcs in the greedy parsing procedure of MaltParser and are less prone to error propagation. Theoretically, MST-Parser should not perform better or worse for edges of any length, which appears to be the case. There is still a slight degradation, but this can be attributed to long dependencies occurring more frequently in constructions with possible ambiguity. Note that even though the area under the curve is much larger for MSTParser, the number of dependency arcs with a length greater than ten is much smaller than the number with length less than ten, which is why the overall accuracy of each system is nearly identical. For all properties considered here, bin size generally shrinks in size as the value on the x-axis increases.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Length Factors",
"sec_num": "4.1"
},
{
"text": "The structure of the predicted and gold standard dependency graphs can also provide insight into the differences between each model. For example, measuring accuracy for arcs relative to their distance to the artificial root node will detail errors at different levels of the dependency graph. For a given arc, we define this distance as the number of arcs in the reverse path from the modifier of the arc to the root. Figure 4 plots the precision and recall of each system for arcs of varying distance to the root. Precision is equal to the percentage of dependency arcs in the predicted graph that are at a distance of d and are correct. Recall is the percentage of dependency arcs in the gold standard graph that are at a distance of d and were predicted. Figure 4 clearly shows that for arcs close to the root, MSTParser is much more precise than Malt-Parser, and vice-versa for arcs further away from the root. This is probably the most compelling graph given in this study since it reveals a clear distinction: MSTParser's precision degrades as the distance to the root increases whereas MaltParser's precision increases. The plots essentially run in opposite directions crossing near the middle. Dependency arcs further away from the root are usually constructed early in the parsing algorithm of MaltParser. Again a reduced likelihood of error propagation coupled with a rich feature representation benefits that parser substantially. Furthermore, MaltParser tends to overpredict root modifiers, because all words that the parser fails to attach as modifiers are automatically connected to the root, as explained in section 2.3. Hence, low precision for root modifiers (without a corresponding drop in recall) is an indication that the transition-based parser produces fragmented parses.",
"cite_spans": [],
"ref_spans": [
{
"start": 418,
"end": 426,
"text": "Figure 4",
"ref_id": null
},
{
"start": 758,
"end": 766,
"text": "Figure 4",
"ref_id": null
}
],
"eq_spans": [],
"section": "Graph Factors",
"sec_num": "4.2"
},
{
"text": "The behaviour of MSTParser is a little trickier to explain. One would expect that its errors should be distributed evenly over the graph. For the most part this is true, with the exception of spikes at the ends of the plot. The high performance for root modification (distance of 1) can be explained through the fact that this is typically a low entropy decision -usually the parsing algorithm has to determine the main verb from a small set of possibilities. On the other end of the plot there is a sharp downwards spike for arcs of distance greater than 10. It turns out that MSTParser over-predicts arcs near the bottom of the graph. Whereas MaltParser pushes difficult parsing decisions higher in the graph, MSTParser appears to push these decisions lower.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Graph Factors",
"sec_num": "4.2"
},
{
"text": "The next graph property we will examine aims to quantify the local neighbourhood of an arc within a dependency graph. Two dependency arcs, (i, j, l) and (i , j , l ) are classified as siblings if they represent syntactic modifications of the same word, i.e., i = i . Figure 5 measures the precision and recall of each system relative to the number of predicted and gold standard siblings of each arc. There is not much to distinguish between the parsers on this metric. MSTParser is slightly more precise for arcs that are predicted with more siblings, whereas Malt-Parser has slightly higher recall on arcs that have more siblings in the gold standard tree. Arcs closer to the root tend to have more siblings, which ties this result to the previous ones.",
"cite_spans": [],
"ref_spans": [
{
"start": 267,
"end": 275,
"text": "Figure 5",
"ref_id": null
}
],
"eq_spans": [],
"section": "Graph Factors",
"sec_num": "4.2"
},
{
"text": "The final graph property we wish to look at is the degree of non-projectivity. The degree of a dependency arc from word w to word u is defined here as the number of words occurring between w and u that are not descendants of w and modify a word that does not occur between w and u (Nivre, 2006) . In the example from Figure 1 , the arc from jedna to Z has a degree of one, and all other arcs have a degree of zero. Figure 6 plots dependency arc precision and recall relative to arc degree in predicted and gold standard dependency graphs. MSTParser is more precise when predicting arcs with high degree and MaltParser vice-versa. Again, this can be explained by the fact that there is a tight correlation between a high degree of non-projectivity, dependency length, distance to root and number of siblings.",
"cite_spans": [
{
"start": 281,
"end": 294,
"text": "(Nivre, 2006)",
"ref_id": "BIBREF13"
}
],
"ref_spans": [
{
"start": 317,
"end": 325,
"text": "Figure 1",
"ref_id": "FIGREF0"
},
{
"start": 415,
"end": 423,
"text": "Figure 6",
"ref_id": null
}
],
"eq_spans": [],
"section": "Graph Factors",
"sec_num": "4.2"
},
{
"text": "It is important to relate each system's accuracy to a set of linguistic categories, such as parts of speech and dependency types. Therefore, we have made an attempt to distinguish a few broad categories that are cross-linguistically identifiable, based on the available documentation of the treebanks used in the shared task.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Linguistic Factors",
"sec_num": "4.3"
},
{
"text": "For parts of speech, we distinguish verbs (including both main verbs and auxiliaries), nouns (including proper names), pronouns (sometimes also including determiners), adjectives, adverbs, adpositions (prepositions, postpositions), and conjunctions (both coordinating and subordinating). For dependency types, we distinguish a general root category (for labels used on arcs from the artificial root, including either a generic label or the label assigned to predicates of main clauses, which are normally verbs), a subject category, an object category (including both direct and indirect objects), and various categories related to coordination. Figure 7 shows the accuracy of the two parsers for different parts of speech. This figure measures labeled dependency accuracy relative to the part of speech of the modifier word in a dependency relation. We see that MaltParser has slightly better accuracy for nouns and pronouns, while MSTParser does better on all other categories, in particular conjunctions. This pattern is consistent with previous results insofar as verbs and conjunctions are often involved in dependencies closer to the root that span longer distances, while nouns and pronouns are typically attached to verbs and therefore occur lower in the graph, with shorter distances. Empirically, adverbs resemble verbs and conjunctions with respect to root distance but group with nouns and pronouns for dependency length, so the former appears to be more important. In addition, both conjunctions and adverbs tend to have a high number of siblings, making the results consistent with the graph in Figure 5 .",
"cite_spans": [],
"ref_spans": [
{
"start": 646,
"end": 654,
"text": "Figure 7",
"ref_id": "FIGREF4"
},
{
"start": 1609,
"end": 1617,
"text": "Figure 5",
"ref_id": null
}
],
"eq_spans": [],
"section": "Linguistic Factors",
"sec_num": "4.3"
},
{
"text": "Adpositions and especially adjectives constitute a puzzle, having both high average root distance and low average dependency length. Adpositions do tend to have a high number of siblings on average, which could explain MSTParser's performance on that category. However, adjectives on average occur the furthest away from the root, have the shortest dependency length and the fewest siblings. As such, we do not have an explanation for this behaviour.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Linguistic Factors",
"sec_num": "4.3"
},
{
"text": "In the top half of Figure 8 , we consider precision and recall for dependents of the root node (mostly verbal predicates), and for subjects and objects. As already noted, MSTParser has considerably better precision (and slightly better recall) for the root category, but MaltParser has an advantage for the nominal categories, especially subjects. A possible explanation for the latter result, in addition to the lengthbased and graph-based factors invoked before, is that MaltParser integrates labeling into the parsing process, so that previously assigned dependency labels can be used as features, which may be important to disambiguate subjects and objects.",
"cite_spans": [],
"ref_spans": [
{
"start": 19,
"end": 27,
"text": "Figure 8",
"ref_id": "FIGREF5"
}
],
"eq_spans": [],
"section": "Linguistic Factors",
"sec_num": "4.3"
},
{
"text": "Finally, in the bottom half of Figure 8 , we display precision and recall for coordinate structures, divided into different groups depending on the type of analysis adopted in a particular treebank. The category CCH (coordinating conjunction as head) contains conjunctions analyzed as heads of coordinate structures, with a special dependency label that does not describe the function of the coordinate structure in the larger syntactic structure, a type of category found in the so-called Prague style analysis of coordination and used in the data sets for Arabic, Czech, and Slovene. The category CCD (coordinating conjunction as dependent) instead denotes conjunctions that are attached as dependents of one of the conjuncts with a label that only marks them as conjunctions, a type of category found in the data sets for Bulgarian, Danish, German, Portuguese, Swedish and Turkish. The two remaining categories contain conjuncts that are assigned a dependency label that only marks them as conjuncts and that are attached either to the conjunction (CJCC) or to another conjunct (CJCJ). The former is found in Bulgarian, Danish, and German; the latter only in Portuguese and Swedish. For most of the coordination categories there is little or no difference between the two parsers, but for CCH there is a difference in both precision and recall of almost 20 percentage points to MSTParser's advantage. This can be explained by noting that, while the categories CCD, CJCC, and CJCJ denote relations that are internal to the coordinate structure and therefore tend to be local, the CCH relations hold between the coordinate structure and its head, which is often a relation that spans over a greater distance and is nearer the root of the dependency graph. It is likely that the difference in accuracy for this type of dependency accounts for a large part of the difference in accuracy noted earlier for conjunctions as a part of speech.",
"cite_spans": [],
"ref_spans": [
{
"start": 31,
"end": 39,
"text": "Figure 8",
"ref_id": "FIGREF5"
}
],
"eq_spans": [],
"section": "Linguistic Factors",
"sec_num": "4.3"
},
{
"text": "The experiments from the previous section highlight the fundamental trade-off between global training and exhaustive inference on the one hand and expressive feature representations on the other. Error propagation is an issue for MaltParser, which typi-cally performs worse on long sentences, long dependency arcs and arcs higher in the graphs. But this is offset by the rich feature representation available to these models that result in better decisions for frequently occurring arc types like short dependencies or subjects and objects. The errors for MSTParser are spread a little more evenly. This is expected, as the inference algorithm and feature representation should not prefer one type of arc over another. What has been learned? It was already known that the two systems make different errors through the work of Sagae and Lavie (2006) . However, in that work an arc-based voting scheme was used that took only limited account of the properties of the words connected by a dependency arc (more precisely, the overall accuracy of each parser for the part of speech of the dependent). The analysis in this work not only shows that the errors made by each system are different, but that they are different in a way that can be predicted and quantified. This is an important step in parser development.",
"cite_spans": [
{
"start": 826,
"end": 848,
"text": "Sagae and Lavie (2006)",
"ref_id": "BIBREF14"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion",
"sec_num": "4.4"
},
{
"text": "To get some upper bounds of the improvement that can be obtained by combining the strengths of each models, we have performed two oracle experiments. Given the output of the two systems, we can envision an oracle that can optimally choose which single parse or combination of sub-parses to predict as a final parse. For the first experiment the oracle is provided with the single best parse from each system, say G = (V, A) and G = (V , A ). The oracle chooses a parse that has the highest number of correctly predicted labeled dependency attachments. In this situation, the oracle accuracy is 84.5%. In the second experiment the oracle chooses the tree that maximizes the number of correctly predicted dependency attachments, subject to the restriction that the tree must only contain arcs from A \u222a A . This can be computed by setting the weight of an arc to 1 if it is in the correct parse and in the set A \u222a A . All other arc weights are set to negative infinity. One can then simply find the tree that has maximal sum of arc weights using directed spanning tree algorithms. This technique is similar to the parser voting methods used by Sagae and Lavie (2006) . In this situation, the oracle accuracy is 86.9%.",
"cite_spans": [
{
"start": 1141,
"end": 1163,
"text": "Sagae and Lavie (2006)",
"ref_id": "BIBREF14"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion",
"sec_num": "4.4"
},
{
"text": "In both cases we see a clear increase in accuracy: 86.9% and 84.5% relative to 81% for the individual systems. This indicates that there is still potential for improvement, just by combining the two existing models. More interestingly, however, we can use the analysis to get ideas for new models. Below we sketch some possible new directions:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion",
"sec_num": "4.4"
},
{
"text": "1. Ensemble systems: The error analysis presented in this paper could be used as inspiration for more refined weighting schemes for ensemble systems of the kind proposed by Sagae and Lavie (2006) , making the weights depend on a range of linguistic and graph-based factors.",
"cite_spans": [
{
"start": 173,
"end": 195,
"text": "Sagae and Lavie (2006)",
"ref_id": "BIBREF14"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion",
"sec_num": "4.4"
},
{
"text": "2. Hybrid systems: Rather than using an ensemble of several parsers, we may construct a single system integrating the strengths of each parser described here. This could defer to a greedy inference strategy during the early stages of the parse in order to benefit from a rich feature representation, but then default to a global exhaustive model as the likelihood for error propagation increases.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion",
"sec_num": "4.4"
},
{
"text": "3. Novel approaches: The two approaches investigated are each based on a particular combination of training and inference methods. We may naturally ask what other combinations may prove fruitful. For example, what about globally trained, greedy, transition-based models? This is essentially what Daum\u00e9 III et al. (2006) provide, in the form of a general search-based structured learning framework that can be directly applied to dependency parsing. The advantage of this method is that the learning can set model parameters relative to errors resulting directly from the search strategy -such as error propagation due to greedy search. When combined with MaltParser's rich feature representation, this could lead to significant improvements in performance.",
"cite_spans": [
{
"start": 296,
"end": 319,
"text": "Daum\u00e9 III et al. (2006)",
"ref_id": "BIBREF2"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion",
"sec_num": "4.4"
},
{
"text": "We have presented a thorough study of the difference in errors made between global exhaustive graph-based parsing systems (MSTParser) and local greedy transition-based parsing systems (Malt-Parser). We have shown that these differences can be quantified and tied to theoretical expectations of each model, which may provide insights leading to better models in the future.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "5"
},
{
"text": "http://mstparser.sourceforge.net",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
},
{
"text": "http://w3.msi.vxu.se/users/nivre/research/MaltParser.html 3 In addition, results were reported for unlabeled attachment score (UAS) (tokens with the correct head) and label accuracy (LA) (tokens with the correct label).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "",
"sec_num": null
}
],
"back_matter": [],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "The PDT: A 3-level annotation scenario",
"authors": [
{
"first": "A",
"middle": [],
"last": "B\u00f6hmov\u00e1",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Haji\u010d",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "Haji\u010dov\u00e1",
"suffix": ""
},
{
"first": "B",
"middle": [],
"last": "Hladk\u00e1",
"suffix": ""
}
],
"year": 2003,
"venue": "Treebanks: Building and Using Parsed Corpora",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "A. B\u00f6hmov\u00e1, J. Haji\u010d, E. Haji\u010dov\u00e1, and B. Hladk\u00e1. 2003. The PDT: A 3-level annotation scenario. In A. Abeill\u00e9, editor, Treebanks: Building and Using Parsed Corpora, chapter 7. Kluwer Academic Publish- ers.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "CoNLL-X shared task on multilingual dependency parsing",
"authors": [
{
"first": "S",
"middle": [],
"last": "Buchholz",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "Marsi",
"suffix": ""
}
],
"year": 2006,
"venue": "Proc. CoNLL",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "S. Buchholz and E. Marsi. 2006. CoNLL-X shared task on multilingual dependency parsing. In Proc. CoNLL.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Search-based structured prediction",
"authors": [
{
"first": "Hal",
"middle": [],
"last": "Daum\u00e9",
"suffix": ""
},
{
"first": "Iii",
"middle": [],
"last": "",
"suffix": ""
},
{
"first": "John",
"middle": [],
"last": "Langford",
"suffix": ""
},
{
"first": "Daniel",
"middle": [],
"last": "Marcu",
"suffix": ""
}
],
"year": 2006,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Hal Daum\u00e9 III, John Langford, and Daniel Marcu. 2006. Search-based structured prediction. In Submission.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Synchronous dependency insertion grammars: A grammar formalism for syntax based statistical MT",
"authors": [
{
"first": "Y",
"middle": [],
"last": "Ding",
"suffix": ""
},
{
"first": "M",
"middle": [],
"last": "Palmer",
"suffix": ""
}
],
"year": 2004,
"venue": "Workshop on Recent Advances in Dependency Grammars (COLING)",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Y. Ding and M. Palmer. 2004. Synchronous dependency insertion grammars: A grammar formalism for syntax based statistical MT. In Workshop on Recent Advances in Dependency Grammars (COLING).",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Word Grammar",
"authors": [
{
"first": "R",
"middle": [],
"last": "Hudson",
"suffix": ""
}
],
"year": 1984,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "R. Hudson. 1984. Word Grammar. Blackwell.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Structural disambiguation with constraint propagation",
"authors": [
{
"first": "H",
"middle": [],
"last": "Maruyama",
"suffix": ""
}
],
"year": 1990,
"venue": "Proc. ACL",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "H. Maruyama. 1990. Structural disambiguation with constraint propagation. In Proc. ACL.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "Online large-margin training of dependency parsers",
"authors": [
{
"first": "R",
"middle": [],
"last": "Mcdonald",
"suffix": ""
},
{
"first": "K",
"middle": [],
"last": "Crammer",
"suffix": ""
},
{
"first": "F",
"middle": [],
"last": "Pereira",
"suffix": ""
}
],
"year": 2005,
"venue": "Proc. ACL",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "R. McDonald, K. Crammer, and F. Pereira. 2005a. On- line large-margin training of dependency parsers. In Proc. ACL.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "Non-projective dependency parsing using spanning tree algorithms",
"authors": [
{
"first": "R",
"middle": [],
"last": "Mcdonald",
"suffix": ""
},
{
"first": "F",
"middle": [],
"last": "Pereira",
"suffix": ""
},
{
"first": "K",
"middle": [],
"last": "Ribarov",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Haji\u010d",
"suffix": ""
}
],
"year": 2005,
"venue": "Proc. HLT/EMNLP",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "R. McDonald, F. Pereira, K. Ribarov, and J. Haji\u010d. 2005b. Non-projective dependency parsing using spanning tree algorithms. In Proc. HLT/EMNLP.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Multilingual dependency analysis with a two-stage discriminative parser",
"authors": [
{
"first": "R",
"middle": [],
"last": "Mcdonald",
"suffix": ""
},
{
"first": "K",
"middle": [],
"last": "Lerman",
"suffix": ""
},
{
"first": "F",
"middle": [],
"last": "Pereira",
"suffix": ""
}
],
"year": 2006,
"venue": "Proc. CoNLL",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "R. McDonald, K. Lerman, and F. Pereira. 2006. Multi- lingual dependency analysis with a two-stage discrim- inative parser. In Proc. CoNLL.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Dependency Syntax: Theory and Practice",
"authors": [
{
"first": "I",
"middle": [
"A"
],
"last": "",
"suffix": ""
}
],
"year": 1988,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "I.A. Mel'\u010duk. 1988. Dependency Syntax: Theory and Practice. State University of New York Press.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "Pseudo-projective dependency parsing",
"authors": [
{
"first": "J",
"middle": [],
"last": "Nivre",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Nilsson",
"suffix": ""
}
],
"year": 2005,
"venue": "Proc. ACL",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "J. Nivre and J. Nilsson. 2005. Pseudo-projective depen- dency parsing. In Proc. ACL.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "Labeled pseudo-projective dependency parsing with support vector machines",
"authors": [
{
"first": "J",
"middle": [],
"last": "Nivre",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Hall",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Nilsson",
"suffix": ""
},
{
"first": "G",
"middle": [],
"last": "Eryigit",
"suffix": ""
},
{
"first": "S",
"middle": [],
"last": "Marinov",
"suffix": ""
}
],
"year": 2006,
"venue": "Proc. CoNLL",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "J. Nivre, J. Hall, J. Nilsson, G. Eryigit, and S. Marinov. 2006. Labeled pseudo-projective dependency parsing with support vector machines. In Proc. CoNLL.",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "An efficient algorithm for projective dependency parsing",
"authors": [
{
"first": "J",
"middle": [],
"last": "Nivre",
"suffix": ""
}
],
"year": 2003,
"venue": "Proc. IWPT",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "J. Nivre. 2003. An efficient algorithm for projective de- pendency parsing. In Proc. IWPT.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "Constraints on non-projective dependency parsing",
"authors": [
{
"first": "J",
"middle": [],
"last": "Nivre",
"suffix": ""
}
],
"year": 2006,
"venue": "Proc. EACL",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "J. Nivre. 2006. Constraints on non-projective depen- dency parsing. In Proc. EACL.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "Parser combination by reparsing",
"authors": [
{
"first": "K",
"middle": [],
"last": "Sagae",
"suffix": ""
},
{
"first": "A",
"middle": [],
"last": "Lavie",
"suffix": ""
}
],
"year": 2006,
"venue": "Proc. HLT/NAACL",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "K. Sagae and A. Lavie. 2006. Parser combination by reparsing. In Proc. HLT/NAACL.",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "The Meaning of the Sentence in Its Pragmatic Aspects",
"authors": [
{
"first": "P",
"middle": [],
"last": "Sgall",
"suffix": ""
},
{
"first": "E",
"middle": [],
"last": "Haji\u010dov\u00e1",
"suffix": ""
},
{
"first": "J",
"middle": [],
"last": "Panevov\u00e1",
"suffix": ""
}
],
"year": 1986,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "P. Sgall, E. Haji\u010dov\u00e1, and J. Panevov\u00e1. 1986. The Mean- ing of the Sentence in Its Pragmatic Aspects. Reidel.",
"links": null
},
"BIBREF16": {
"ref_id": "b16",
"title": "Learning syntactic patterns for automatic hypernym discovery",
"authors": [
{
"first": "R",
"middle": [],
"last": "Snow",
"suffix": ""
},
{
"first": "D",
"middle": [],
"last": "Jurafsky",
"suffix": ""
},
{
"first": "A",
"middle": [
"Y"
],
"last": "Ng",
"suffix": ""
}
],
"year": 2004,
"venue": "Proc. NIPS",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "R. Snow, D. Jurafsky, and A. Y. Ng. 2004. Learning syntactic patterns for automatic hypernym discovery. In Proc. NIPS.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"text": "Example dependency graph.",
"num": null,
"type_str": "figure",
"uris": null
},
"FIGREF1": {
"text": "Accuracy relative to sentence length.",
"num": null,
"type_str": "figure",
"uris": null
},
"FIGREF2": {
"text": "Dependency arc precision/recall relative to predicted/gold dependency length.",
"num": null,
"type_str": "figure",
"uris": null
},
"FIGREF3": {
"text": "Dependency arc precision/recall relative to predicted/gold distance to root. Dependency arc precision/recall relative to number of predicted/gold siblings.",
"num": null,
"type_str": "figure",
"uris": null
},
"FIGREF4": {
"text": "Accuracy for different parts of speech.",
"num": null,
"type_str": "figure",
"uris": null
},
"FIGREF5": {
"text": "Precision/recall for different dependency types.",
"num": null,
"type_str": "figure",
"uris": null
}
}
}
} |