File size: 82,107 Bytes
6fa4bc9 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 | {
"paper_id": "U07-1017",
"header": {
"generated_with": "S2ORC 1.0.0",
"date_generated": "2023-01-19T03:08:50.628843Z"
},
"title": "An Empirical Investigation into Grammatically Constrained Contexts in Predicting Distributional Similarity",
"authors": [
{
"first": "Dongqiang",
"middle": [],
"last": "Yang",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "University of South Australia Po",
"location": {
"postBox": "Box 2100",
"postCode": "5001",
"settlement": "Adelaide",
"country": "South Australia"
}
},
"email": ""
},
{
"first": "|",
"middle": [],
"last": "David",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "University of South Australia Po",
"location": {
"postBox": "Box 2100",
"postCode": "5001",
"settlement": "Adelaide",
"country": "South Australia"
}
},
"email": ""
},
{
"first": "M",
"middle": [
"W"
],
"last": "Powers",
"suffix": "",
"affiliation": {
"laboratory": "",
"institution": "University of South Australia Po",
"location": {
"postBox": "Box 2100",
"postCode": "5001",
"settlement": "Adelaide",
"country": "South Australia"
}
},
"email": ""
}
],
"year": "",
"venue": null,
"identifiers": {},
"abstract": "The utility of syntactic dependencies in computing distributional similarity has not yet been fully investigated. Most research based on syntactically conditioned co-occurrences simply ignores the salience of grammatical relations and effectively merges syntactic dependencies into one 'context'. Through calculating distributional similarity, we design two experiments to explore and evaluate the four major types of contexts that are conditioned on grammatical relations. The consistent results show that the headmodifier dependency plays an important role in predicting the semantic features of nouns and verbs, in contrast to other dependencies.",
"pdf_parse": {
"paper_id": "U07-1017",
"_pdf_hash": "",
"abstract": [
{
"text": "The utility of syntactic dependencies in computing distributional similarity has not yet been fully investigated. Most research based on syntactically conditioned co-occurrences simply ignores the salience of grammatical relations and effectively merges syntactic dependencies into one 'context'. Through calculating distributional similarity, we design two experiments to explore and evaluate the four major types of contexts that are conditioned on grammatical relations. The consistent results show that the headmodifier dependency plays an important role in predicting the semantic features of nouns and verbs, in contrast to other dependencies.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Abstract",
"sec_num": null
}
],
"body_text": [
{
"text": "The roles of grammatical relations in predicting semantic similarity via distributional similarity have not been fully analysed. Most approaches simply chained these syntactic dependencies into one unified context representation for computing distributional similarity, such as in Word Sense Disambiguation (WSD) (Yarowsky, 1993; Lin, 1997; Resnik, 1997) , word sense induction (Pantel and Lin, 2002) , automatic thesaurus construction (Grefenstette, 1992; Lin, 1998; Curran, 2003) , finding the predominant sense (McCarthy et al., 2004) , etc.",
"cite_spans": [
{
"start": 313,
"end": 329,
"text": "(Yarowsky, 1993;",
"ref_id": "BIBREF20"
},
{
"start": 330,
"end": 340,
"text": "Lin, 1997;",
"ref_id": "BIBREF7"
},
{
"start": 341,
"end": 354,
"text": "Resnik, 1997)",
"ref_id": null
},
{
"start": 378,
"end": 400,
"text": "(Pantel and Lin, 2002)",
"ref_id": "BIBREF12"
},
{
"start": 436,
"end": 456,
"text": "(Grefenstette, 1992;",
"ref_id": "BIBREF2"
},
{
"start": 457,
"end": 467,
"text": "Lin, 1998;",
"ref_id": "BIBREF8"
},
{
"start": 468,
"end": 481,
"text": "Curran, 2003)",
"ref_id": "BIBREF1"
},
{
"start": 514,
"end": 537,
"text": "(McCarthy et al., 2004)",
"ref_id": "BIBREF9"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "It is clear that these approaches weighed each dependency through its frequency statistics, e.g. in the weighted (Grefenstette, 1992) or mutual information based (Lin, 1998) Jaccard's coefficient. Although they proposed to replace the unordered context with the syntactically conditioned one, the linguistic specificity of gram-matical relations in semantics prediction is often overlooked. Except for the extraction of syntactically conditioned contexts, they in fact make no differentiation between grammatical relations, which work analogously as computing distributional similarity with unordered contexts. Without distinguishing the linguistic specificity of grammatical relations, the advantage of using the syntactic constrained context has not yet been fully exploited when yielding statistical semantics from word distributions. Our goal is thereof to study the salience of these syntactic dependencies in regulating statistical semantics, which can improve the acquisition of semantic knowledge in the Vector Space Model (VSM). Pad\u00f3 and Lapata (2007) attempted to investigate the role of each single type of syntactic dependency in their syntactically conditioned VSM. They assumed a direct dependency as an undirected path (with a length of 1) in the graph of syntactic dependencies. In addition to this, they experimented a predefined (oblique) weighting scheme (Keenan and Comrie, 1977) in ranking dependencies, i.e. subject to verb: 5, object to verb: 4, prepositional phrase to verb: 3, etc. The optimal VSM they derived was equipped with inversely weighting dependencies within the path length less than 3, rather than this predefined scheme.",
"cite_spans": [
{
"start": 113,
"end": 133,
"text": "(Grefenstette, 1992)",
"ref_id": "BIBREF2"
},
{
"start": 162,
"end": 173,
"text": "(Lin, 1998)",
"ref_id": "BIBREF8"
},
{
"start": 1038,
"end": 1060,
"text": "Pad\u00f3 and Lapata (2007)",
"ref_id": "BIBREF11"
},
{
"start": 1374,
"end": 1399,
"text": "(Keenan and Comrie, 1977)",
"ref_id": "BIBREF5"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Introduction",
"sec_num": "1"
},
{
"text": "Although they investigated a commonly adopted case of syntactic dependencies with the path length equal to 1, the mapping function for reducing data sparseness and dimensionality of their VSM, e.g. congregating any paths ending with the same word, has obscured distinguishing the dependences in predicting semantic similarity. Their work has not completely shown to what extent one single type of syntactic dependency can contribute to statical semantics.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Related work",
"sec_num": "2"
},
{
"text": "Another similar work was conducted by Plas and Bouma (2005) in enriching Dutch Eu-roWordNet through clustering distributionally similar words. They investigated the major types of grammatical relationships for nouns in Dutch, and found the predicate-object relation performing best against others such as subject-predicate and adjective-noun. Hoverer, the dependencies exposed to verbs has not been explored.",
"cite_spans": [
{
"start": 38,
"end": 59,
"text": "Plas and Bouma (2005)",
"ref_id": "BIBREF14"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Related work",
"sec_num": "2"
},
{
"text": "The goal of our work is to explore the utility of the major types of grammatical relations in predicting semantic similarity. Accordingly, distributional similarity is computed directly from each individual syntactic set rather than on a subtractive or additive fusion. To derive German semantic verb classes with distributional grammatical relations, Schulte im Walde (2006) uses additive fusion to merge syntactic and semantic features including pure verb subcategorization frames, prepositional preferences, and selectional preferences one-by-one into a final verb representation (on the condition that the features have been thoroughly studied in verb semantics). Since the distributional features of individual dependency set has not yet been fully explored, we will not go to seeking for the prime word representation through the subtractive or additive fusion, which could be the next phase of our work.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Related work",
"sec_num": "2"
},
{
"text": "In the following, we first describe how to give rise to word representation using syntactic dependencies. In the two 'gold-standard' datasets, we evaluate each single type of dependency straight through correlating distributional similarity with human judgements. Without the 'gold-standard' data, we then employ automatic thesaurus construction to evaluate these dependencies in lexical acquisition.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Related work",
"sec_num": "2"
},
{
"text": "Word meaning can be represented as a function of co-occurrence frequencies within different contexts, and similar words share similar contexts (Harris, 1985) . In a VSM, the dimensionality of a semantic space can be syntactically conditioned (i.e. syntactic dependencies) or unconditioned (i.e. a bag of words). Different methodologies of distributional similarity under these two context settings, have been systematically surveyed, e.g. for a bag of words (Sahlgren, 2006) and for syntactic dependencies (Curran, 2003; Weeds, 2003) . Moreover, the difference between the two kinds of contexts is also contrasted in a framework (Pad\u00f3 and Lapata, 2007) , with a preliminary conclusion that the syntactically conditioned VSM outperformed the unconditioned one.",
"cite_spans": [
{
"start": 143,
"end": 157,
"text": "(Harris, 1985)",
"ref_id": "BIBREF4"
},
{
"start": 458,
"end": 474,
"text": "(Sahlgren, 2006)",
"ref_id": "BIBREF15"
},
{
"start": 506,
"end": 520,
"text": "(Curran, 2003;",
"ref_id": "BIBREF1"
},
{
"start": 521,
"end": 533,
"text": "Weeds, 2003)",
"ref_id": "BIBREF18"
},
{
"start": 629,
"end": 652,
"text": "(Pad\u00f3 and Lapata, 2007)",
"ref_id": "BIBREF11"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Syntactic dependency",
"sec_num": "3"
},
{
"text": "Instead of arguing the states and advantages of these context representations in applications, we focuses on the roles of major types of grammatical relations in the syntactic constrained VSM. The major types of these relations mainly embodied either in head-modifier, i.e. adjective to noun (AN) and adverb or the nominal head in a prepositional phrase to verb (RV) or in grammatical roles of verb-object (VO) and subjectverb (SV). The premises mainly rely on the following: (1) the meaning of a noun could depend on its modifiers such as adjectives, nouns, and the nominal head in a prepositional phrase as well as the grammatical role of a noun in a sentence as a subject or object; and (2) the meaning of a verb could be determined by its direct object, subject, or the head of a prepositional phrase.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Syntactic dependency",
"sec_num": "3"
},
{
"text": "To capture these relations accurately we employ a widely used and freely available parser based on link grammar (Sleator and Temperley, 1991) .",
"cite_spans": [
{
"start": 112,
"end": 141,
"text": "(Sleator and Temperley, 1991)",
"ref_id": "BIBREF17"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Classification and parsing",
"sec_num": "3.1"
},
{
"text": "In Link Grammar each word is equipped with 'left-pointing' and/or 'right-pointing' connectors. Based on the crafted rules of the connectors in validating word usages, a link between two words can be formed in reflecting a dependency relation. Apart from these word rules, 'crossinglinks' and 'connectivity' are the two global rules working on interlinks, which respectively restrict a link from starting or ending in the middle of pre-existed links and force all the words of a sentence to be traced along links.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Classification and parsing",
"sec_num": "3.1"
},
{
"text": "There are in total 107 major link types in the Link Grammar parser (ver. 4.1), whereas there are also various sub-link types that specify special cases of dependencies.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Classification and parsing",
"sec_num": "3.1"
},
{
"text": "Using this parser, we extracted and classified the following link types into the four main types of dependencies:",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Classification and parsing",
"sec_num": "3.1"
},
{
"text": "\u2022 RV 1. E: verbs and their adverb pre-modifiers 2. EE: adverbs and their adverb premodifiers 3. MV: verbs and their post-modifiers such as adverbs, prepositional phrase Note that except for RV, we define the AN, SV, and VO dependencies almost identically to shallow parsers (Grefenstette, 1992; Curran, 2003) , or a full parser of MINIPAR (Lin, 1998) but we retrieve them instead through the Link Grammar parser.",
"cite_spans": [
{
"start": 274,
"end": 294,
"text": "(Grefenstette, 1992;",
"ref_id": "BIBREF2"
},
{
"start": 295,
"end": 308,
"text": "Curran, 2003)",
"ref_id": "BIBREF1"
},
{
"start": 339,
"end": 350,
"text": "(Lin, 1998)",
"ref_id": "BIBREF8"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Classification and parsing",
"sec_num": "3.1"
},
{
"text": "Given different methodologies to implementing parsing, it is hardly fair to justify a syntactic parser. Molla and Hutchinson (2003) compared the Link Grammar (LG) parser and the Conexor Functional Dependency Grammar (CFDG) parser with respect to intrinsic and extrinsic evaluations. In the intrinsic evaluation the performance of the two parsers was compared and measured in terms of the precision and recall of extracting four types of dependencies, including subject-verb, verb-object, head-modifier, and head-complement. In the extrinsic evaluation a question-answering application was used to contrast the two parsers. Although the LG parser is inferior to the CFDG parser in locating the four types of dependencies, they are not significantly different when applied in question answering. Given that our main task is to study the difference of the syntactic dependencies: RV, AN, SV, and VO, acquired with the same LG parser, in predicting semantics, it is appropriate to use the LG parser to extract these dependencies.",
"cite_spans": [
{
"start": 104,
"end": 131,
"text": "Molla and Hutchinson (2003)",
"ref_id": "BIBREF10"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Classification and parsing",
"sec_num": "3.1"
},
{
"text": "After parsing the 100 million-word British National Corpus (BNC) and filtering out noncontent words and morphology analysis, we separately extracted and clustered the relationships to construct 4 parallel raw matrixes Xs (cooccurrence sets) in terms of the 4 syntactic dependencies above (hereafter the syntactically conditioned co-occurrences, denoted as R X : RV X , AN X , SV X , and VO X ). The row vectors of R X denoted respectively Rv X , An X , Sv X , and Vo X , whereas the column vectors of R X are denoted as rV X , aN X , sV X , and vO X respectively.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Matrix construction",
"sec_num": "3.2"
},
{
"text": "The four matrices treat contexts with semantic contents in the frame of the syntactic dependencies. These additional constraints yield rarer events than word co-occurrences in a bag of words. The four syntactic matrices are extremely sparse with nulls in over 95% of the cells. However, they impose more accurate or meaningful (grammatical) relationships between words providing the parser is reasonable accurate. Instead of eliminating the triples with lower frequencies, we kept all co-occurrences to avoid worsening data sparseness.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Matrix construction",
"sec_num": "3.2"
},
{
"text": "We first substituted the frequency of cell X i,jfreq(X i,j ) with its information form using log(freq(X i,j )+1) to retain sparsity (0 0). It can produce \"a kind of space effect\" (Landauer and Dumais, 1997 ) that can lessen the gradient of the frequency-rank curve in Zipf's law (1965) , reducing the gap between rarer events and frequent ones.",
"cite_spans": [
{
"start": 179,
"end": 205,
"text": "(Landauer and Dumais, 1997",
"ref_id": "BIBREF6"
},
{
"start": 268,
"end": 285,
"text": "Zipf's law (1965)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Dimensionality reduction",
"sec_num": "3.3"
},
{
"text": "We then applied Single Value Decomposition (SVD) to smooth the matrices and reduce their dimensionalities to 250, commonly adopted in NLP or LSA (on the word by document matrix). We do not normalize the documents by document entropy as we are not dealing with whole documents but small contexts.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Dimensionality reduction",
"sec_num": "3.3"
},
{
"text": "In effect, we map a word-by-word matrix into two word-by-concept (uncorrelated component) matrices after SVD. Consider SV X a m by n matrix representing subject-verb dependencies between m subjects and n verbs. The SV relation can be demonstrated by either using the rows (Sv X or {X i,* }) of SV X corresponding to nouns conditioned as subjects of verbs in sentences, or the columns (sV X or {X *,j }) to verbs conditioned by nouns as subjects. The cell X i,j shows the frequency of the ith subject with the jth verb. The ith row X i,* of SV X is a profile of the ith subject in terms of its all verbs and the jth column X *,j of SV X profiles the jth verb versus its subjects. We represent the SV relation respectively using the rows (Sv X or {Xi, * }) of SV X corresponding to nouns conditioned as subjects of verbs in sentences (m by 250 after SVD), and the columns (sV X or {X * ,j}) to verbs conditioned by nouns as subjects (n by 250 after SVD).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Dimensionality reduction",
"sec_num": "3.3"
},
{
"text": "With respect to the mutual effect of the dependencies on words, the distributional features of nouns mainly focus on aN X , An X , vO X , and Sv X , whereas the verbs focus on Vo X , rV X , and sV X . Distributional similarity can be evaluated on these dependency sets.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Dimensionality reduction",
"sec_num": "3.3"
},
{
"text": "Note that we also concatenated these dependency sets into one united set (denoted as All X ) respectively for nouns and verbs, which indicates the common case of combining all dependencies in computing distributional similarity. All X also functioned as a baseline in the following evaluations.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Dimensionality reduction",
"sec_num": "3.3"
},
{
"text": "We consistently employed the cosine similarity of word vectors as used in LSA and commonly adopted in assessing distributional similarity. Our contribution is to explore and contrast the semantic features of different syntactic dependencies consistently with one similarity method-the cosine, rather than to compare different distributional similarity measures with one united syntactic structure that combines all the dependencies together. Although taking into account more similarity measures in the evaluations can solidify conclusions, this would take us beyond the scope of the work.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Dimensionality reduction",
"sec_num": "3.3"
},
{
"text": "Rubenstein and Goodenough , in an experiment of investigating distributional similarity, constructed an evaluation dataset with word pairs and their semantic similarity scores. They hired 51 college undergraduates divided into two groups to measure 65 pairs of nouns with the similarity score ranging from 0 to 4. The higher the similarity number, the more similar the nouns were in their meanings. Many researchers (cf. Budantisky and Hirst (2006) and Pedersen et al. (2004) for some popular taxonomy similarity methods) validated semantic similarity methods using the human group similarity judgments on the standard dataset of the 65 noun-pairs.",
"cite_spans": [
{
"start": 421,
"end": 448,
"text": "Budantisky and Hirst (2006)",
"ref_id": "BIBREF0"
},
{
"start": 453,
"end": 475,
"text": "Pedersen et al. (2004)",
"ref_id": "BIBREF13"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Human similarity judgement",
"sec_num": "4"
},
{
"text": "Another source available is provided by Yang and Powers (2006) in their verb similarity work, where 130 pairs of verbs were scored by 6 subjects with a Likert scale from 0 to 4 (from non-similar to nearly synonymous). This dataset was acquired through the analogous instruction in the 65 noun-pairs similarity judgement.",
"cite_spans": [
{
"start": 40,
"end": 62,
"text": "Yang and Powers (2006)",
"ref_id": "BIBREF19"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Human similarity judgement",
"sec_num": "4"
},
{
"text": "Instead of answering if two words are synonymous or not, we compare to what extent distributional similarity derived from each dependency set correlate well with the human judgements on these 65 (noun) and 130 (verb) pairs. Finkelstein et al. 2002created another dataset-a large volume of 353 word pairs. But these pairs are not strictly rated with semantic similarity rather than with word association strength, for example there are many word associations such as Maradonna-football and FBI-investigation. Therefore, we did not attempt to include it to evaluate distributional similarity.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Human similarity judgement",
"sec_num": "4"
},
{
"text": "In this task, we tested distributional similarity (the cosine) respectively on the 65 noun-pairs with four sub-syntactic sets: aN X , An X , vO X , and Sv X where nouns are mainly represented, as well as on the 130 verb pairs with the three subsyntactic sets: Vo X , rV X , and sV X where verbs as row vectors can be represented with their objects, modifiers, and subjects. Table 1 : The value/rank correlation (r/\u03c1) on the syntactically conditioned dependencies After calculating the cosine similarity of two word vectors in each subset, we then computed Pearson's correlation (r) and Spearman's correlation (\u03c1) between human average scores and distributional similarity (the cosine) scores. The results in different sub-synsets are shown in Table 1. Note that in Table 1 we also listed the taxonomy-based similarity measures proposed by Yang and Powers (2005; , shortened for Sim WN that is based on a lexical knowledge base (WordNet) and can be referred in the next section. Sim WN can be taken as the upper bands for the 2 tasks, because Yang and Powers results on both '130 verbs' and '65 nouns' were competitive against others popular methods coded in the WordNet similarity package (Pedersen et al., 2004) .",
"cite_spans": [
{
"start": 839,
"end": 861,
"text": "Yang and Powers (2005;",
"ref_id": null
},
{
"start": 1189,
"end": 1212,
"text": "(Pedersen et al., 2004)",
"ref_id": "BIBREF13"
}
],
"ref_spans": [
{
"start": 374,
"end": 381,
"text": "Table 1",
"ref_id": null
},
{
"start": 765,
"end": 772,
"text": "Table 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Results",
"sec_num": "4.1"
},
{
"text": "aN X An X vO X Sv X All X Sim",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Results",
"sec_num": "4.1"
},
{
"text": "Note that unless otherwise specified we ran the paired T-test at the significance level of \u03b1 = 0.05 in the following sections. As to the '65' dataset in Table 1 -(a), distributional similarity in aN X with correlations over 72% predicted more accurate semantic similarity than the other three subsets: An X , Sv X , and vO X . Nonetheless, aN X only significantly outperformed Sv X and vO X rather than An X . Note that An X was significantly better in correlating with human judgments than vO X but not Sv X . Both Sv X and vO X performed on a par without significant difference. The multiple linear regression shows that the combined model with aN X , An X , vO X , and Sv X (r = 0.74) was significantly better than guessing the mean (F = 15.394, p < 0.001), where An X , vO X , and Sv X contributed little to the linear combination (p > 0.05) and aN X was the only significant contributor to the model (p < 0.001).",
"cite_spans": [],
"ref_spans": [
{
"start": 153,
"end": 160,
"text": "Table 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Discussion on the noun task",
"sec_num": "4.2"
},
{
"text": "In contrast to the upper band of Sim WN , an approach to taxonomic similarity, distributional similarity on aN X , An X , vO X , and Sv X both significantly underperformed Sim WN in correlating with human judgements.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion on the noun task",
"sec_num": "4.2"
},
{
"text": "Table 1-(a) also contains the correlation of the baseline All X with human ratings in this task (r = 0.62). Without any fusion, distributional similarity on aN X correlated better with human judgments than All X , whereas An X performed nearly identically with All X .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion on the noun task",
"sec_num": "4.2"
},
{
"text": "As shown in Table 1-(b), the cosine similarity in rV X with the correlation of about 60% predicted relatively more accurate semantic similarity than other two subsets: sV X and Vo X , but the differences in their correlations were not significant. With the multiple linear regression on Vo X , rV X , and sV X , we observed that 38.4% of variations in human judgement was accounted for in the combined model (F = 26.151, p < 0.001) that strongly correlated with the observed values (r = 0.62). Both rV X and Vo X made a significant contribution in the model with the exception of sV X .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion on the verb task",
"sec_num": "4.3"
},
{
"text": "As for the taxonomic similarity in Table 1 -(b), distributional similarities on Vo X , rV X , and sV X were significantly inferior to Sim WN in terms of correlations with human judgements on the 130 pairs.",
"cite_spans": [],
"ref_spans": [
{
"start": 35,
"end": 42,
"text": "Table 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Discussion on the verb task",
"sec_num": "4.3"
},
{
"text": "With respect to the united dependency set, consisting of Vo X , rV X , and sV X , only rV X performed competitively against the baseline All X .",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion on the verb task",
"sec_num": "4.3"
},
{
"text": "Due to the hypothesis of distributional representations, distributional similarity of words should correlate with the common features they share (Harris, 1985) . We defined and collected the Intersection Attribute Frequency (IAF), which indicates on average how many common attributes any two words share in each dependency set R X . For the 65 noun pairs, IAF on aN X (65.2) was larger than it on An X (49.2), vO X (26.6), and Sv X (20.9), which corresponded well to their orders of the correlations in Table 1-(a). For the 130 verb pairs, IAF on rV X (168.9) was greater than it on Vo X (139.1) and sV X (105.1), which tallied with the relatively higher correlation on rV X (r = 0.59) than on Vo X (r = 0.49) and sV X (r = 0.41) in Table 1 -(b) . This is in accordance with the intuition that the more features words share, the more similar they are, which could account for the difference between the dependencies in predicting semantic features.",
"cite_spans": [
{
"start": 145,
"end": 159,
"text": "(Harris, 1985)",
"ref_id": "BIBREF4"
},
{
"start": 742,
"end": 746,
"text": "-(b)",
"ref_id": null
}
],
"ref_spans": [
{
"start": 734,
"end": 741,
"text": "Table 1",
"ref_id": null
}
],
"eq_spans": [],
"section": "Frequency bias",
"sec_num": "4.4"
},
{
"text": "Instead of comparing distributional similarity with the 'gold-standard' of human similarity judgement, one of the application-style evaluations on distributional similarity is to automatically produce a thesaurus entry for each target word, through which the accuracy of synonyms or near-synonyms captured can indirectly measure the capabilities of the syntactic dependencies in predicting lexical semantics.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Thesaurus Construction",
"sec_num": "5"
},
{
"text": "The usual way of creating an automatic thesaurus is to extract the top n words in the similar word list of each target as the entries of its thesaurus, after calculating and ranking the distributional similarity between the target and all of the other words. The accuracy and coverage of thesauri inevitably depend on the size and domains of the corpora used, as well as the measures of computing distributional similarity.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Thesaurus Construction",
"sec_num": "5"
},
{
"text": "Given the same distributional similarity (cosine) across the dependency sets, the results of thesaurus construction can test semantic constraints of grammatical relations. Instead of a normal thesaurus with a full coverage of PoS tags, we only compile the thesaurus entries of nouns and verbs that account for the major part of published thesauri.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Thesaurus Construction",
"sec_num": "5"
},
{
"text": "(a) The similar words to sentence (as a noun) We select 100 nouns and 100 verbs with term frequencies of around 10,000 times in BNC. Highly frequent words are likely to be functional words and the less frequent words may not happen in the semantic sets. In fact, the average frequency of the nouns in An X , aN X , Sv X , and vO X are respectively about 3400, 5600, 1200, and 1700, and the verbs in rV X , Vo X , and sV X 3000, 3300, and 2000, as we only extracted syntactic dependencies from BNC.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Candidate words",
"sec_num": "5.1"
},
{
"text": "For a target word in each sub-syntactic set, we produced and ranked the top 20 words as candidates for the automatic thesaurus after computing distributional similarity of the target with all other words in each sub-syntactic set. The population of the nouns or the verbs consists of 2000 words. In Table 2 , we exemplify the top 20 similar words of sentence (as a noun) and attack (as a verb).",
"cite_spans": [],
"ref_spans": [
{
"start": 299,
"end": 306,
"text": "Table 2",
"ref_id": "TABREF2"
}
],
"eq_spans": [],
"section": "Candidate words",
"sec_num": "5.1"
},
{
"text": "It is not a trivial work to evaluate distributional thesauri in the absence of a benchmark set. After constructing a 'gold standard' dataset consisting of Roget's Thesaurus (1911) , Macquarie's Thesaurus, and Webster's 7th dictionary, Grefenstette (1993) evaluated his automatic thesaurus extracted from Grolier's Encyclopaedia using distributional similarity on syntactic dependencies. If two words were located under the same topic in Roget or Macquarie, or shared two or more terms in their definitions in the dictionary, they were counted as a successful hit for synonyms or semantic-relatedness.",
"cite_spans": [
{
"start": 155,
"end": 179,
"text": "Roget's Thesaurus (1911)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluation",
"sec_num": "5.2"
},
{
"text": "To improve the coverage of the 'gold standard' dataset in the experiment, Curran (2003) incorporated more thesauri: Roget Thesaurus (both the free version provided by Project Gutenberg (1911) and the modern version of Roget's II), Moby Thesaurus, The New Oxford Thesaurus of English, and The Macquarie Encyclopedic Thesaurus.",
"cite_spans": [
{
"start": 74,
"end": 87,
"text": "Curran (2003)",
"ref_id": "BIBREF1"
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluation",
"sec_num": "5.2"
},
{
"text": "Instead of simply matching with the 'gold standard' thesauri, Lin (1998) proposed to compare the structures of his automatic thesaurus to WordNet and Roget through his taxonomic similarity approach, i.e. taking into account the order of the similar words produced from distributional similarity. Inspired by Lin's work (1998) , we also defined two different similarity measures to compare the automatic thesaurus with the 'gold standard', i.e. Sim WN for WordNet and Sim RT for Roget. Instead of recording the similarity scores produced in Sim WN and Sim RT we counted the number of similar words within similarity thresholds.",
"cite_spans": [
{
"start": 62,
"end": 72,
"text": "Lin (1998)",
"ref_id": "BIBREF8"
},
{
"start": 308,
"end": 325,
"text": "Lin's work (1998)",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluation",
"sec_num": "5.2"
},
{
"text": "\u2022 Sim WN : There are numerous noun similarity methods in the WordNet similarity package of Pedersen et al. (2004) . However, since the similarity method proposed by Yang and Powers (2005; was competitive and also worked on the 130 verb pairs unlike other algorithms, we employed their algorithm in the evaluation. Note that their meth- ods were in fact based on edge-counting in the taxonomy of WordNet. In the task, we set up a shorter searching depth limit \u03b3 = 4 for nouns to identify words that are more similar, and \u03b3 = 2 for verbs. If two distributionally similar words are syn/antonym or connected with each other in the taxonomy with the shortest path length less than the depth limit, we counted them as a successful hit, i.e. semantic relatedness. \u2022 Sim RT : Roget's Thesaurus divides its hierarchy top class to the bottom topic, and stores topic-related words under one of 1000 topics. We counted it a hit if two words are situated under the same topic or the higher level of the same section, i.e. the distance between two words was no more than 2 levels.",
"cite_spans": [
{
"start": 91,
"end": 113,
"text": "Pedersen et al. (2004)",
"ref_id": "BIBREF13"
},
{
"start": 165,
"end": 187,
"text": "Yang and Powers (2005;",
"ref_id": null
}
],
"ref_spans": [],
"eq_spans": [],
"section": "Evaluation",
"sec_num": "5.2"
},
{
"text": "WordNet SA D1 D2 D3 D4 \u2211",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Results",
"sec_num": "5.3"
},
{
"text": "Noun aN X 2.8 7.5 10.0 8.2 5.3 33.7 27.5 46.7 An X 1.5 5.5 9.6 8.6 5.3 30.6 22.3 43.4 vO X 1.6 4.5 5.9 5.1 4.1 21.2 17.9 33.0 Sv X 1.1 2.9 4.8 5.0 3.7 17.4 14.1 29.2 All X 3.0 7.3 11.2 8.7 5.6 36.1 30.1 46.9 Verb rV X 5.3 16.5 13.7 35.5 31.1 46.2 Vo X 4.1 13.8 13.3 31.1 26.9 43.4 sV X 2.7 9.6 12.0 24.3 24.1 37.7 All X 4.0 20.0 12.8 36.7 30.2 47.9 Table 3 : The evaluation results of noun and verb thesauri.",
"cite_spans": [],
"ref_spans": [
{
"start": 349,
"end": 356,
"text": "Table 3",
"ref_id": null
}
],
"eq_spans": [],
"section": "Roget Total",
"sec_num": null
},
{
"text": "The results of our automatic thesauri for the nouns and verbs in the sub-syntactic sets are listed in Table 3 . For Sim WN , SA denotes the accuracy on the syn/antonyms of the targets, and DI the accuracy on the words with exactly I link distance to targets (for nouns I \u2264 \u03b3 = 4; for verbs I \u2264 \u03b3 = 2); \u2211 denotes the overall accuracy. For Sim RT , Roget indicates the overall accuracy in Roget, and Total the overall accuracy in both WordNet and Roget.",
"cite_spans": [],
"ref_spans": [
{
"start": 102,
"end": 109,
"text": "Table 3",
"ref_id": null
}
],
"eq_spans": [],
"section": "Roget Total",
"sec_num": null
},
{
"text": "In Table 3 both the noun thesaurus from aN X and the verb thesaurus from rV X achieved the highest overall accuracy in WordNet, Roget, and Total. The paired-sample T-test on the accuracy of each target in each sub-syntactic set showed that (1) distributional similarity extracted significantly more similar nouns from aN X than other three dependency sets: An X , vO X , and Sv X , and from An X than the other two sets: vO X and Sv X ;",
"cite_spans": [],
"ref_spans": [
{
"start": 3,
"end": 10,
"text": "Table 3",
"ref_id": null
}
],
"eq_spans": [],
"section": "Discussion",
"sec_num": "5.4"
},
{
"text": "(2) there were not significant difference between rV X and Vo X in retrieving real similar verbs through distributional similarity, but both of them were significantly better than Sv X . The baseline All X , incorporating more grammatical relations into one representation, i.e. aN X , An X , vO X , and Sv X for nouns and Vo X , rV X , and sV X for verbs, retrieved more synonyms or near-synonyms in its automatic thesauri than other single dependency set. The advantage of All X against others is not a surprise given the syntactic dependencies it combined. However All X vs. aN X and rV X shows no significant discrepancy on accuracy, which also implied the strength of the head-modifier relations on dominating lexical semantics.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion",
"sec_num": "5.4"
},
{
"text": "We further varied the threshold from 20 to 50 words increasingly with 10 words to study the effect of the size of term clusters on accuracy. We found that the results were similar, and the drop of the overall accuracy of nouns and verbs was on average 4% and not significant (p < 0.05).",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion",
"sec_num": "5.4"
},
{
"text": "These homogeneous results in retrieving semantically similar or related words, together with those in judging semantic similarity, indicated that the head-modifier relations strongly correlates with semantic properties for nouns and verbs.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Discussion",
"sec_num": "5.4"
},
{
"text": "As indicated in the previous evaluation, we also collected the IAF statistics of 2,000 noun and 2,000 verb pairs in these dependency sets, which can signify to what extent two words share common distributional structures in each dependency set. The highest IAF 135.4 in aN X (respectively 92.4 in An X , 35.9 in vO X , and 28.1 in Sv X ) and 87.6 in rV X (53.1 and 45.8 in Vo X and sV X ) corresponds to the highest accuracy of each dependency set in yielding automatic thesaurus construction. These results were consistent with them in the relatively small data sets of 65 nounpairs and 130 verb-pairs from the previous section, where IAF is proportional to the correlations of distributional similarity on each type of grammatical relations with human similarity judgements.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Frequency bias",
"sec_num": "5.5"
},
{
"text": "Through human similarity judgements and automatic thesaurus construction, we study the major types of syntactic dependencies in expressing semantic salience. The consistent results show that semantic features of nouns and verbs are most strongly characterised by the head-modifier relations. The distinctive linguistic features of these syntactic dependencies provide an empirical basis for how to better model word meanings. Our future work would be to fuse these features in the distributional representation of words, and tailor them for specific applications.",
"cite_spans": [],
"ref_spans": [],
"eq_spans": [],
"section": "Conclusion",
"sec_num": "6"
}
],
"back_matter": [],
"bib_entries": {
"BIBREF0": {
"ref_id": "b0",
"title": "Evaluating WordNet-based Measures of Lexical Semantic Relatedness",
"authors": [
{
"first": "Alexander",
"middle": [],
"last": "Budantisky",
"suffix": ""
},
{
"first": "Graeme",
"middle": [],
"last": "Hirst",
"suffix": ""
}
],
"year": 2006,
"venue": "Computational Linguistics",
"volume": "32",
"issue": "1",
"pages": "13--47",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Budantisky, Alexander and Graeme Hirst (2006). Evaluating WordNet-based Measures of Lexical Semantic Relatedness. Computational Linguistics 32(1): 13-47.",
"links": null
},
"BIBREF1": {
"ref_id": "b1",
"title": "Placing Search in Context: The Concept Revisited",
"authors": [
{
"first": "James",
"middle": [
"R"
],
"last": "Curran",
"suffix": ""
},
{
"first": "Evgeniy",
"middle": [],
"last": "Lev",
"suffix": ""
},
{
"first": "Yossi",
"middle": [],
"last": "Gabrilovich",
"suffix": ""
},
{
"first": "Ehud",
"middle": [],
"last": "Matias",
"suffix": ""
},
{
"first": "Zach",
"middle": [],
"last": "Rivlin",
"suffix": ""
},
{
"first": "Gadi",
"middle": [],
"last": "Solan",
"suffix": ""
},
{
"first": "Eytan",
"middle": [],
"last": "Wolfman",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Ruppin",
"suffix": ""
}
],
"year": 2002,
"venue": "ACM Transactions on Information Systems",
"volume": "20",
"issue": "",
"pages": "116--131",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Curran, James R. (2003). From Distributional to Se- mantic Similarity. Ph.D thesis Finkelstein, Lev, Evgeniy Gabrilovich, Yossi Matias, Ehud Rivlin, Zach Solan, Gadi Wolfman and Eytan Ruppin (2002). Placing Search in Context: The Concept Revisited. ACM Transactions on Informa- tion Systems 20: 116-131.",
"links": null
},
"BIBREF2": {
"ref_id": "b2",
"title": "Sextant: Exploring Unexplored Contexts for Semantic Extraction from Syntactic Analysis",
"authors": [
{
"first": "Gregory",
"middle": [],
"last": "Grefenstette",
"suffix": ""
}
],
"year": 1992,
"venue": "the 30th Annual Meeting of the Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "324--326",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Grefenstette, Gregory (1992). Sextant: Exploring Un- explored Contexts for Semantic Extraction from Syntactic Analysis. In the 30th Annual Meeting of the Association for Computational Linguistics, 324-326. Newark, Delaware.",
"links": null
},
"BIBREF3": {
"ref_id": "b3",
"title": "Evaluation Techniques for Automatic Semantic Extraction: Comparing Syntactic and Window Based Approaches",
"authors": [
{
"first": "Gregory",
"middle": [],
"last": "Grefenstette",
"suffix": ""
}
],
"year": 1993,
"venue": "the Workshop on Acquisition of Lexical Knowledge from Text",
"volume": "",
"issue": "",
"pages": "143--153",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Grefenstette, Gregory (1993). Evaluation Techniques for Automatic Semantic Extraction: Comparing Syntactic and Window Based Approaches. In the Workshop on Acquisition of Lexical Knowledge from Text, 143-153.",
"links": null
},
"BIBREF4": {
"ref_id": "b4",
"title": "Distributional Structure. In The Philosophy of Linguistics",
"authors": [
{
"first": "Zellig",
"middle": [],
"last": "Harris",
"suffix": ""
}
],
"year": 1985,
"venue": "",
"volume": "",
"issue": "",
"pages": "26--47",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Harris, Zellig (1985). Distributional Structure. In The Philosophy of Linguistics J. J. Katz, (ed). New York, Oxford University Press: 26-47.",
"links": null
},
"BIBREF5": {
"ref_id": "b5",
"title": "Noun phrase accessibility and universal grammar",
"authors": [
{
"first": "Edward",
"middle": [],
"last": "Keenan",
"suffix": ""
},
{
"first": "Bernard",
"middle": [],
"last": "Comrie",
"suffix": ""
}
],
"year": 1977,
"venue": "Linguistic Inquiry",
"volume": "8",
"issue": "",
"pages": "62--100",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Keenan, Edward and Bernard Comrie (1977). Noun phrase accessibility and universal grammar. Lin- guistic Inquiry 8: 62-100.",
"links": null
},
"BIBREF6": {
"ref_id": "b6",
"title": "A Solution to Plato's Problem: the Latent Semantic Analysis Theory of Acquisition, Induction, and Representation of Knowledge",
"authors": [
{
"first": "Thomas",
"middle": [
"K"
],
"last": "Landauer",
"suffix": ""
},
{
"first": "Susan",
"middle": [
"T"
],
"last": "Dumais",
"suffix": ""
}
],
"year": 1997,
"venue": "Psychological Review",
"volume": "104",
"issue": "",
"pages": "211--240",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Landauer, Thomas K. and Susan T. Dumais (1997). A Solution to Plato's Problem: the Latent Semantic Analysis Theory of Acquisition, Induction, and Representation of Knowledge. Psychological Re- view 104: 211-240.",
"links": null
},
"BIBREF7": {
"ref_id": "b7",
"title": "Using Syntactic Dependency as a Local Context to Resolve Word Sense Ambiguity",
"authors": [
{
"first": "Dekang",
"middle": [],
"last": "Lin",
"suffix": ""
}
],
"year": 1997,
"venue": "the 35th Annual Meeting of the Association for Computational Linguistics",
"volume": "",
"issue": "",
"pages": "64--71",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Lin, Dekang (1997). Using Syntactic Dependency as a Local Context to Resolve Word Sense Ambiguity. In the 35th Annual Meeting of the Association for Computational Linguistics, 64-71. Madrid, Spain.",
"links": null
},
"BIBREF8": {
"ref_id": "b8",
"title": "Automatic Retrieval and Clustering of Similar Words",
"authors": [
{
"first": "Dekang",
"middle": [],
"last": "Lin",
"suffix": ""
}
],
"year": 1998,
"venue": "the 17th International Conference on Computational Linguistics",
"volume": "",
"issue": "",
"pages": "768--774",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Lin, Dekang (1998). Automatic Retrieval and Cluster- ing of Similar Words. In the 17th International Conference on Computational Linguistics, 768- 774. Montreal, Quebec, Canada.",
"links": null
},
"BIBREF9": {
"ref_id": "b9",
"title": "Finding Predominant Senses in Untagged Text",
"authors": [
{
"first": "Diana",
"middle": [],
"last": "Mccarthy",
"suffix": ""
},
{
"first": "Rob",
"middle": [],
"last": "Koeling",
"suffix": ""
},
{
"first": "Julie",
"middle": [],
"last": "Weeds",
"suffix": ""
},
{
"first": "John",
"middle": [],
"last": "Carroll",
"suffix": ""
}
],
"year": 2004,
"venue": "the 42nd Annual Meeting of the Association for Computational Linguistics (ACL-04)",
"volume": "",
"issue": "",
"pages": "267--287",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "McCarthy, Diana, Rob Koeling, Julie Weeds and John Carroll (2004). Finding Predominant Senses in Untagged Text. In the 42nd Annual Meeting of the Association for Computational Linguistics (ACL- 04), 267-287. Barcelona, Spain.",
"links": null
},
"BIBREF10": {
"ref_id": "b10",
"title": "Intrinsic versus Extrinsic Evaluations of Parsing Systems",
"authors": [
{
"first": "Diego",
"middle": [],
"last": "Molla",
"suffix": ""
},
{
"first": "Ben",
"middle": [],
"last": "Hutchinson",
"suffix": ""
}
],
"year": 2003,
"venue": "European Association for Computational Linguistics(EACL), workshop on Evaluation Initiatives in Natural Language Processing",
"volume": "",
"issue": "",
"pages": "43--50",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Molla, Diego and Ben Hutchinson (2003). Intrinsic versus Extrinsic Evaluations of Parsing Systems. In European Association for Computational Linguis- tics(EACL), workshop on Evaluation Initiatives in Natural Language Processing, 43-50. Budapest, Hungary.",
"links": null
},
"BIBREF11": {
"ref_id": "b11",
"title": "Dependency-based construction of semantic space models",
"authors": [
{
"first": "Sebastian",
"middle": [],
"last": "Pad\u00f3",
"suffix": ""
},
{
"first": "Mirella",
"middle": [],
"last": "Lapata",
"suffix": ""
}
],
"year": 2007,
"venue": "Computational Linguistics",
"volume": "33",
"issue": "2",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Pad\u00f3, Sebastian and Mirella Lapata (2007). Depend- ency-based construction of semantic space models. To appear in Computational Linguistics 33(2).",
"links": null
},
"BIBREF12": {
"ref_id": "b12",
"title": "Discovering Word Senses from Text",
"authors": [
{
"first": "Patrick",
"middle": [],
"last": "Pantel",
"suffix": ""
},
{
"first": "Dekang",
"middle": [],
"last": "Lin",
"suffix": ""
}
],
"year": 2002,
"venue": "the Eighth ACM SIGKDD International Conference on Knowledge Discovery and Data Mining",
"volume": "",
"issue": "",
"pages": "613--619",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Pantel, Patrick and Dekang Lin (2002). Discovering Word Senses from Text. In the Eighth ACM SIGKDD International Conference on Knowledge Discovery and Data Mining, 613-619. New York, NY, USA.",
"links": null
},
"BIBREF13": {
"ref_id": "b13",
"title": "WordNet::Similarity -Measuring the Relatedness of Concepts",
"authors": [
{
"first": "Ted",
"middle": [],
"last": "Pedersen",
"suffix": ""
},
{
"first": "Siddharth",
"middle": [],
"last": "Patwardhan",
"suffix": ""
},
{
"first": "Jason",
"middle": [],
"last": "Michelizzi",
"suffix": ""
}
],
"year": 2004,
"venue": "the Nineteenth National Conference on Artificial Intelligence (AAAI-04)",
"volume": "",
"issue": "",
"pages": "1024--1025",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Pedersen, Ted, Siddharth Patwardhan and Jason Michelizzi (2004). WordNet::Similarity -Measur- ing the Relatedness of Concepts. In the Nineteenth National Conference on Artificial Intelligence (AAAI-04), 1024-1025. San Jose, CA.",
"links": null
},
"BIBREF14": {
"ref_id": "b14",
"title": "Contexts for Finding Semantically Similar Words",
"authors": [
{
"first": "Lonneke",
"middle": [],
"last": "Plas",
"suffix": ""
},
{
"first": "Gosse",
"middle": [],
"last": "Van Der",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Bouma",
"suffix": ""
}
],
"year": 1997,
"venue": "ACL Siglex Workshop on Tagging Text with Lexical Semantics, Why, What and How",
"volume": "",
"issue": "",
"pages": "52--57",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Plas, Lonneke van der and Gosse Bouma (2005). Contexts for Finding Semantically Similar Words. In the 20th International Conference on Computa- tional Linguistics, 173-186. Geneva, Switzerland. Resnik, Philip (1997). Selectional Preference and Sense Disambiguation. In ACL Siglex Workshop on Tagging Text with Lexical Semantics, Why, What and How?, 52-57. Washington, USA.",
"links": null
},
"BIBREF15": {
"ref_id": "b15",
"title": "The Word-Space Model: Using Distributional Analysis to Represent Syntagmatic and Paradigmatic Relations between Words in High-Dimensional Vector Spaces",
"authors": [
{
"first": "Magnus",
"middle": [],
"last": "Sahlgren",
"suffix": ""
}
],
"year": 2006,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Sahlgren, Magnus (2006). The Word-Space Model: Using Distributional Analysis to Represent Syn- tagmatic and Paradigmatic Relations between Words in High-Dimensional Vector Spaces. Ph.D thesis",
"links": null
},
"BIBREF16": {
"ref_id": "b16",
"title": "Experiments on the Automatic Induction of German Semantic Verb Classes",
"authors": [
{
"first": "Sabine",
"middle": [],
"last": "Schulte Im Walde",
"suffix": ""
}
],
"year": 2006,
"venue": "Computational Linguistics",
"volume": "32",
"issue": "2",
"pages": "159--194",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Schulte im Walde, Sabine (2006). Experiments on the Automatic Induction of German Semantic Verb Classes. Computational Linguistics 32(2): 159-194.",
"links": null
},
"BIBREF17": {
"ref_id": "b17",
"title": "Parsing English with a Link Grammar",
"authors": [
{
"first": "Daniel",
"middle": [],
"last": "Sleator",
"suffix": ""
},
{
"first": "Davy",
"middle": [],
"last": "Temperley",
"suffix": ""
}
],
"year": 1991,
"venue": "",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Sleator, Daniel and Davy Temperley (1991). Parsing English with a Link Grammar, Carnegie Mellon University.",
"links": null
},
"BIBREF18": {
"ref_id": "b18",
"title": "Measures and Applications of Lexical Distributional Similarity",
"authors": [
{
"first": "Julie",
"middle": [
"Elizabeth"
],
"last": "Weeds",
"suffix": ""
},
{
"first": ";",
"middle": [],
"last": "Dongqiang",
"suffix": ""
},
{
"first": "David",
"middle": [
"M W"
],
"last": "Powers",
"suffix": ""
}
],
"year": 2003,
"venue": "the Twenty-Eighth Australasian Computer Science Conference (ACSC2005)",
"volume": "",
"issue": "",
"pages": "315--322",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Weeds, Julie Elizabeth (2003). Measures and Appli- cations of Lexical Distributional Similarity. Ph.D thesis Yang, Dongqiang and David M.W. Powers (2005). Measuring Semantic Similarity in the Taxonomy of WordNet. In the Twenty-Eighth Australasian Computer Science Conference (ACSC2005), 315- 322. Newcastle, Australia, ACS.",
"links": null
},
"BIBREF19": {
"ref_id": "b19",
"title": "Verb Similarity on the Taxonomy of WordNet",
"authors": [
{
"first": "Dongqiang",
"middle": [],
"last": "Yang",
"suffix": ""
},
{
"first": "David",
"middle": [
"M W"
],
"last": "Powers",
"suffix": ""
}
],
"year": 2006,
"venue": "the 3rd International WordNet Conference (GWC-06)",
"volume": "",
"issue": "",
"pages": "121--128",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Yang, Dongqiang and David M.W. Powers (2006). Verb Similarity on the Taxonomy of WordNet. In the 3rd International WordNet Conference (GWC- 06), 121-128. Jeju Island, Korea.",
"links": null
},
"BIBREF20": {
"ref_id": "b20",
"title": "One Sense per Collocation",
"authors": [
{
"first": "David",
"middle": [],
"last": "Yarowsky",
"suffix": ""
}
],
"year": 1993,
"venue": "ARPA Human Language Technology Workshop",
"volume": "",
"issue": "",
"pages": "266--271",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Yarowsky, David (1993). One Sense per Collocation. In ARPA Human Language Technology Work- shop, 266-271. Princeton, New Jersey.",
"links": null
},
"BIBREF21": {
"ref_id": "b21",
"title": "Human Behavior and the Principle of Least Effort: an Introduction to",
"authors": [
{
"first": "George",
"middle": [],
"last": "Zipf",
"suffix": ""
},
{
"first": "",
"middle": [],
"last": "Kingsley",
"suffix": ""
}
],
"year": 1965,
"venue": "Human Ecology. N.Y",
"volume": "",
"issue": "",
"pages": "",
"other_ids": {},
"num": null,
"urls": [],
"raw_text": "Zipf, George Kingsley (1965). Human Behavior and the Principle of Least Effort: an Introduction to Human Ecology. N.Y., Hafner Pub. Co.",
"links": null
}
},
"ref_entries": {
"FIGREF0": {
"text": "The correlations on '65 nouns'rV X Vo X sV X All X SimWN 0.59 0.49 0.41 0.57 0.84 r \u03a1 0.51 0.44 0.38 0.53 0.77 (b) The correlations on '130 verbs'",
"type_str": "figure",
"uris": null,
"num": null
},
"TABREF0": {
"html": null,
"text": "O: verbs and their direct/indirect objects 2. OT: verbs and their time objects 3. P: verbs and their complements such as adjectives and passive participles",
"num": null,
"type_str": "table",
"content": "<table><tr><td>\u2022 AN</td></tr><tr><td>1. A: nouns and their adjective pre-</td></tr><tr><td>modifiers</td></tr><tr><td>2. AN: nouns and their noun pre-modifiers</td></tr><tr><td>3. GN: common nouns and their proper</td></tr><tr><td>nouns e.g. Prime Minister Howard.</td></tr><tr><td>4. M: nouns and their various post-</td></tr><tr><td>modifiers such as prepositional phrases,</td></tr><tr><td>adjectives, and participles</td></tr><tr><td>\u2022 SV</td></tr><tr><td>1. S: subject-nouns/gerunds and their finite</td></tr><tr><td>verbs. There are also some sub-link types</td></tr><tr><td>under S, for example, Ss*g stands for</td></tr><tr><td>gerunds and their predicates, and Sp plu-</td></tr><tr><td>ral nouns and their plural verbs</td></tr><tr><td>2. SI: the inversion of subjects and their</td></tr><tr><td>verbs in questions</td></tr><tr><td>\u2022 VO</td></tr><tr><td>1.</td></tr></table>"
},
"TABREF2": {
"html": null,
"text": "",
"num": null,
"type_str": "table",
"content": "<table><tr><td>: A sample of the distributional</td></tr><tr><td>'thesauri'</td></tr></table>"
}
}
}
} |