File size: 72,507 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
{
    "paper_id": "S13-1013",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T15:43:04.179050Z"
    },
    "title": "HENRY-CORE: Domain Adaptation and Stacking for Text Similarity *",
    "authors": [
        {
            "first": "Michael",
            "middle": [],
            "last": "Heilman",
            "suffix": "",
            "affiliation": {},
            "email": "mheilman@ets.org"
        },
        {
            "first": "Nitin",
            "middle": [],
            "last": "Madnani",
            "suffix": "",
            "affiliation": {},
            "email": "nmadnani@ets.org"
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "This paper describes a system for automatically measuring the semantic similarity between two texts, which was the aim of the 2013 Semantic Textual Similarity (STS) task (Agirre et al., 2013). For the 2012 STS task, Heilman and Madnani (2012) submitted the PERP system, which performed competitively in relation to other submissions. However, approaches including word and n-gram features also performed well (B\u00e4r et al., 2012; Sari\u0107 et al., 2012), and the 2013 STS task focused more on predicting similarity for text pairs from new domains. Therefore, for the three variations of our system that we were allowed to submit, we used stacking (Wolpert, 1992) to combine PERP with word and ngram features and applied the domain adaptation approach outlined by Daume III (2007) to facilitate generalization to new domains. Our submissions performed well at most subtasks, particularly at measuring the similarity of news headlines, where one of our submissions ranked 2nd among 89 from 34 teams, but there is still room for improvement. * System description papers for this task were required to have a team ID and task ID (e.g., \"HENRY-CORE\") as a prefix.",
    "pdf_parse": {
        "paper_id": "S13-1013",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "This paper describes a system for automatically measuring the semantic similarity between two texts, which was the aim of the 2013 Semantic Textual Similarity (STS) task (Agirre et al., 2013). For the 2012 STS task, Heilman and Madnani (2012) submitted the PERP system, which performed competitively in relation to other submissions. However, approaches including word and n-gram features also performed well (B\u00e4r et al., 2012; Sari\u0107 et al., 2012), and the 2013 STS task focused more on predicting similarity for text pairs from new domains. Therefore, for the three variations of our system that we were allowed to submit, we used stacking (Wolpert, 1992) to combine PERP with word and ngram features and applied the domain adaptation approach outlined by Daume III (2007) to facilitate generalization to new domains. Our submissions performed well at most subtasks, particularly at measuring the similarity of news headlines, where one of our submissions ranked 2nd among 89 from 34 teams, but there is still room for improvement. * System description papers for this task were required to have a team ID and task ID (e.g., \"HENRY-CORE\") as a prefix.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "We aim to develop an automatic measure of the semantic similarity between two short texts (e.g., sentences). Such a measure could be useful for various applications, including automated short answer scoring (Leacock and Chodorow, 2003; Nielsen et al., 2008) , question answering (Wang et al., 2007) , and machine translation evaluation (Przybocki et al., 2009) .",
                "cite_spans": [
                    {
                        "start": 207,
                        "end": 235,
                        "text": "(Leacock and Chodorow, 2003;",
                        "ref_id": "BIBREF11"
                    },
                    {
                        "start": 236,
                        "end": 257,
                        "text": "Nielsen et al., 2008)",
                        "ref_id": "BIBREF12"
                    },
                    {
                        "start": 279,
                        "end": 298,
                        "text": "(Wang et al., 2007)",
                        "ref_id": "BIBREF18"
                    },
                    {
                        "start": 336,
                        "end": 360,
                        "text": "(Przybocki et al., 2009)",
                        "ref_id": "BIBREF15"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "In this paper, we describe our submissions to the 2013 Semantic Textual Similarity (STS) task (Agirre et al., 2013) , which evaluated implementations of text-to-text similarity measures. Submissions were evaluated according to Pearson correlations between gold standard similarity values acquired from human raters and machine-produced similarity values. Teams were allowed to submit up to three submissions. For each submission, correlations were calculated separately for four subtasks: measuring similarity between news headlines (\"headlines\"), between machine translation outputs and human reference translations (\"SMT\"), between word glosses from OntoNotes (Pradhan and Xue, 2009) and WordNet (Fellbaum, 1998) (\"OnWN\") , and between frame descriptions from FrameNet (Fillmore et al., 2003) and glosses from WordNet (\"FNWN\"). A weighted mean of the correlations was also computed as an overall evaluation metric (the OnWn and FNWN datasets were smaller than the headlines and SMT datasets).",
                "cite_spans": [
                    {
                        "start": 94,
                        "end": 115,
                        "text": "(Agirre et al., 2013)",
                        "ref_id": "BIBREF1"
                    },
                    {
                        "start": 662,
                        "end": 685,
                        "text": "(Pradhan and Xue, 2009)",
                        "ref_id": "BIBREF14"
                    },
                    {
                        "start": 698,
                        "end": 714,
                        "text": "(Fellbaum, 1998)",
                        "ref_id": "BIBREF8"
                    },
                    {
                        "start": 715,
                        "end": 723,
                        "text": "(\"OnWN\")",
                        "ref_id": null
                    },
                    {
                        "start": 771,
                        "end": 794,
                        "text": "(Fillmore et al., 2003)",
                        "ref_id": "BIBREF9"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "The suggested training data for the 2013 STS task was the data from the 2012 STS task (Agirre et al., 2012) , including both the training and test sets for that year. The 2012 task was similar except that the data were from a different set of subtasks: measuring similarity between sentences from the Microsoft Research Paraphrase corpus (Dolan et al., 2004) (\"MSRpar\") , between sentences from the Microsoft Research Video Description corpus (Chen and Dolan, 2011) (\"MSRvid\") , and between human and machine translations of parliamentary proceedings (\"SMTeuroparl\"). The 2012 task provided training and test sets for those three subtasks and also included two additional tasks with just test sets: a similar OnWN task, and measuring similarity between human and machine translations of news broadcasts (\"SMTnews\").",
                "cite_spans": [
                    {
                        "start": 86,
                        "end": 107,
                        "text": "(Agirre et al., 2012)",
                        "ref_id": "BIBREF0"
                    },
                    {
                        "start": 338,
                        "end": 369,
                        "text": "(Dolan et al., 2004) (\"MSRpar\")",
                        "ref_id": null
                    },
                    {
                        "start": 443,
                        "end": 465,
                        "text": "(Chen and Dolan, 2011)",
                        "ref_id": "BIBREF5"
                    },
                    {
                        "start": 466,
                        "end": 476,
                        "text": "(\"MSRvid\")",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Heilman and Madnani (2012) described the PERP system and submitted it to the 2012 STS task. PERP measures the similarity of a sentence pair by finding a sequence of edit operations (e.g., insertions, deletions, substitutions, and shifts) that converts one sentence to the other. It then uses various features of the edits, with weights learned from labeled sentence pairs, to assign a similarity score. PERP performed well, ranking 7th out of 88 submissions from 35 teams according to the weighted mean correlation. However, PERP lacked some of the useful word and n-gram overlap features included in some of the other top-performing submissions. In addition, domain adaptation seemed more relevant for the STS 2013 task since in-domain data was available only for one (OnWN) of the four subtasks.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Therefore, in this work, we combine the PERP system with various word and n-gram features. We also apply the domain adaptation technique of Daume III (2007) to support generalization beyond the domains in the training data.",
                "cite_spans": [
                    {
                        "start": 140,
                        "end": 156,
                        "text": "Daume III (2007)",
                        "ref_id": "BIBREF6"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "In this section, we describe the system we developed, and the variations of it that comprise our submissions to the 2013 STS task.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "System Details",
                "sec_num": "2"
            },
            {
                "text": "Our system is a linear model estimated using ridge regression, as implemented in the scikit-learn toolkit (Pedregosa et al., 2011) . The system uses a 5-fold cross-validation grid search to tune the \u03b1 penalty for ridge regression (with \u03b1 \u2208 2 {\u22125,\u22124,...,4} ). During development, we evaluated its performance on the full STS 2012 data (training and test) using 10-fold cross-validation, with the 5-fold crossvalidation being used to tune within each training partition.",
                "cite_spans": [
                    {
                        "start": 106,
                        "end": 130,
                        "text": "(Pedregosa et al., 2011)",
                        "ref_id": "BIBREF13"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "System Details",
                "sec_num": "2"
            },
            {
                "text": "Our full system uses the following features computed from an input sentence pair (s 1 , s 2 ).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Features",
                "sec_num": "2.1"
            },
            {
                "text": "The system standardizes feature values to zero mean and unit variance by subtracting the feature's mean and dividing by its standard deviation. The means and standard deviations are estimated from the training set, or from each training partition during cross-validation.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Features",
                "sec_num": "2.1"
            },
            {
                "text": "The system computes Jaccard similarity (i.e., the ratio of the sizes of the set intersection to the set union) for the following overlap features:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "n-gram Overlap Features",
                "sec_num": "2.1.1"
            },
            {
                "text": "\u2022 character n-gram overlap (n = 1 . . . 12). Note that this is computed from the entire original texts for a pair, including punctuation, whitespace, etc.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "n-gram Overlap Features",
                "sec_num": "2.1.1"
            },
            {
                "text": "\u2022 word n-gram overlap (n = 2 . . . 8). We do not include n = 1 here because it would be identical to the n = 1 version for the unordered word n-gram feature described next.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "n-gram Overlap Features",
                "sec_num": "2.1.1"
            },
            {
                "text": "\u2022 unordered word n-gram overlap features (n = 1 . . . 3). By unordered, we mean combinations (in the mathematical sense of \"combinations\") of word tokens, regardless of order. Note that these features are similar to the word n-gram overlap features except that the words need not be contiguous to match. For example, the text \"John saw Mary\" would result in the following unordered word n-grams: {john}, {mary}, {saw}, {john, saw}, {mary, saw}, {john, mary}, and {john, mary, saw}.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "n-gram Overlap Features",
                "sec_num": "2.1.1"
            },
            {
                "text": "For the word and unordered n-gram overlap features, we computed two variants: one based on all tokens and one based on just content words, which we define as words that are not punctuation and do not appear in the NLTK (Bird et al., 2009) English stopword list. We lowercase everything for the word overlap measures but not for character overlap.",
                "cite_spans": [
                    {
                        "start": 219,
                        "end": 238,
                        "text": "(Bird et al., 2009)",
                        "ref_id": "BIBREF4"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "n-gram Overlap Features",
                "sec_num": "2.1.1"
            },
            {
                "text": "The system includes various length-related features, where",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Length Features",
                "sec_num": "2.1.2"
            },
            {
                "text": "L max = max(length(s 1 ), length(s 2 )), L min = min(length(s 1 ), length(s 2 ))",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Length Features",
                "sec_num": "2.1.2"
            },
            {
                "text": ", and length(x) denotes the number of tokens in x. log denotes the natural logarithm.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Length Features",
                "sec_num": "2.1.2"
            },
            {
                "text": "\u2022 log( Lmax L min ) \u2022 Lmax\u2212L min Lmax \u2022 log(L min ) \u2022 log(L max ) \u2022 log(|L max \u2212 L min | + 1) 2.1.3 Sentiment Features",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Length Features",
                "sec_num": "2.1.2"
            },
            {
                "text": "The system includes various features based on the proprietary sentiment lexicon described by Beigman Klebanov et al. (2012) . Each word in this lexicon is associated with a 3-tuple specifying a distribution over three classes: positive, negative, and neutral. These distributions were estimated via crowdsourcing. If a word is not in the lexicon, we assume its positivity and negativity are zero.",
                "cite_spans": [
                    {
                        "start": 101,
                        "end": 123,
                        "text": "Klebanov et al. (2012)",
                        "ref_id": "BIBREF3"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Length Features",
                "sec_num": "2.1.2"
            },
            {
                "text": "We define the set of sentiment words in a sentence s as \u03c3(s) = {w : positivity(w) > 0.5 \u2228 negativity(w) > 0.5}. We also define the positivity, negativity, and neutrality of a sentence as the sum over the corresponding values of individual words w. For example, positivity(s) = w\u2208s positivity(w). The system includes the following features:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Length Features",
                "sec_num": "2.1.2"
            },
            {
                "text": "\u2022 \u03c3(s 1 )\u2229\u03c3(s 2 )",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Length Features",
                "sec_num": "2.1.2"
            },
            {
                "text": "\u03c3(s 1 )\u222a\u03c3(s 2 ) (i.e., the Jaccard similarity of the sentiment words)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Length Features",
                "sec_num": "2.1.2"
            },
            {
                "text": "\u2022 The cosine distance between (positivity(s 1 ), negativity(s 1 )) and (positivity(s 2 ), negativity(s 2 ))",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Length Features",
                "sec_num": "2.1.2"
            },
            {
                "text": "\u2022 |positivity(s 1 ) \u2212 positivity(s 2 )| \u2022 |negativity(s 1 ) \u2212 negativity(s 2 )| \u2022 |neutrality(s 1 ) \u2212 neutrality(s 2 )|",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Length Features",
                "sec_num": "2.1.2"
            },
            {
                "text": "The system also incorporates the PERP system (Heilman and Madnani, 2012) (as briefly described in \u00a71) as a feature in its model by using 10-fold stacking (Wolpert, 1992) . Stacking is a procedure similar to k-fold cross-validation that allows one to use the output of one model as the input to another model, without requiring multiple training sets. A PERP model is iteratively trained on nine folds and then the PERP feature is computed for the tenth, producing PERP features for the whole training set, which are then used in the final regression model.",
                "cite_spans": [
                    {
                        "start": 154,
                        "end": 169,
                        "text": "(Wolpert, 1992)",
                        "ref_id": "BIBREF19"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "PERP with Stacking",
                "sec_num": "2.1.4"
            },
            {
                "text": "We trained PERP in a general manner using data from all the STS 2012 subtasks rather than training subtask-specific models. PERP was trained for 100 iterations.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "PERP with Stacking",
                "sec_num": "2.1.4"
            },
            {
                "text": "We refer readers to Heilman and Madnani (2012) for a full description of PERP. Next, we provide details about modifications made to PERP since STS 2012. Although these details are not necessary to understand how the system works in general, we include them here for completeness.",
                "cite_spans": [
                    {
                        "start": 20,
                        "end": 46,
                        "text": "Heilman and Madnani (2012)",
                        "ref_id": "BIBREF10"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "PERP with Stacking",
                "sec_num": "2.1.4"
            },
            {
                "text": "\u2022 We extended PERP to model abbreviations as zero cost edits, using a list of common abbreviations extracted from Wikipedia. 1",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "PERP with Stacking",
                "sec_num": "2.1.4"
            },
            {
                "text": "\u2022 In a similar vein, we also extended PERP to model multiword sequences with differing punctuation (e.g., \"Built-In Test\" \u2192 \"Built In Test\") as zero cost edits.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "PERP with Stacking",
                "sec_num": "2.1.4"
            },
            {
                "text": "\u2022 We changed the stemming and synonymy edits in the original PERP (Heilman and Madnani, 2012) to be substitution edits that activate additional stemming and synonymy indicator features.",
                "cite_spans": [
                    {
                        "start": 66,
                        "end": 93,
                        "text": "(Heilman and Madnani, 2012)",
                        "ref_id": "BIBREF10"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "PERP with Stacking",
                "sec_num": "2.1.4"
            },
            {
                "text": "\u2022 We added an incentive to TERp's (Snover et al., 2009) original inference algorithm to prefer matching words when searching for a good edit sequence. We added this to avoid rare cases where other edits would have a negative costs, and then the same word in a sentence pair would be, for example inserted and deleted rather than matched.",
                "cite_spans": [
                    {
                        "start": 34,
                        "end": 55,
                        "text": "(Snover et al., 2009)",
                        "ref_id": "BIBREF16"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "PERP with Stacking",
                "sec_num": "2.1.4"
            },
            {
                "text": "\u2022 We fixed a minor bug in the inference algorithm, which appeared to only affect results on the MSRvid subtask in the STS 2012 task.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "PERP with Stacking",
                "sec_num": "2.1.4"
            },
            {
                "text": "\u2022 We tweaked the learning algorithm by increasing the learning rate and not performing weight averaging.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "PERP with Stacking",
                "sec_num": "2.1.4"
            },
            {
                "text": "The system also uses the domain adaptation technique described by Daume III (2007) to facilitate generalization to new domains. Instead of having a single weight for each of the features described above, the system maintains a generic and a subtaskspecific copy. For example, the content bigram overlap feature had six copies: a generic copy and one for each of the five subtasks in the training data from STS 2012 (i.e., OnWN, MSRpar, MSRvid, SMTeuroparl, SMTnews). And then for an instance from MSRpar, only the generic and MSRpar-specific versions of the feature will be active. For an instance from a new subtask (e.g., a test set instance), only the generic feature will be active. We also included a generic intercept feature and intercept features for each subtask (these always had a value of 1). These help the model capture, for example, whether high or low similarities are more frequent in general, without having to use the other feature weights to do so.",
                "cite_spans": [
                    {
                        "start": 66,
                        "end": 82,
                        "text": "Daume III (2007)",
                        "ref_id": "BIBREF6"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Domain Adaptation",
                "sec_num": "2.2"
            },
            {
                "text": "We submitted three variations of the system.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Submissions",
                "sec_num": "2.3"
            },
            {
                "text": "\u2022 Run 1: This run used all the features described above. In addition, we mapped the test subtasks to the training subtasks as follows so that the specific features would be active for test data from previously unseen but related subtasks: headlines to MSRpar, SMT to SMTnews, and FNWN to OnWN.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Submissions",
                "sec_num": "2.3"
            },
            {
                "text": "\u2022 Run 2: As in Run 1, this run used all the features described above. However, we did not map the STS 2013 subtasks to STS 2012 subtasks. Thus, the specific copies of features were only active for OnWN test set examples.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Submissions",
                "sec_num": "2.3"
            },
            {
                "text": "\u2022 Run 3: This run used all the features except for the PERP and sentiment features. Like Run 2, this run did not map subtasks.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Submissions",
                "sec_num": "2.3"
            },
            {
                "text": "This section presents results on the STS 2012 data (our development set) and results for our submissions to STS 2013.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Results",
                "sec_num": "3"
            },
            {
                "text": "Although we used cross-validation on the entire STS 2012 dataset during preliminary experiments ( \u00a72), in this section, we train the system on the original STS 2012 training set and report performance on the original STS 2012 test set, in order to facilitate comparison to submissions to that task. It is important to note that our system's results here may be somewhat optimistic since we had access to the STS 2012 test data and were using it for development, whereas the participants in the 2012 task only had access to the training data. Table 1 presents the results. We include the results for our three submissions, the results for the topranked submission according to the weighted mean (\"UKP\"), the results for the best submission from Heilman and Madnani (2012) (\"PERPphrases\"), and the mean across all submissions. Note that while we compare to the PERP submission from Heilman and Madnani (2012) , the results are not directly comparable since the version of PERP is not the same and since PERP was trained differently.",
                "cite_spans": [
                    {
                        "start": 880,
                        "end": 906,
                        "text": "Heilman and Madnani (2012)",
                        "ref_id": "BIBREF10"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 542,
                        "end": 549,
                        "text": "Table 1",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "STS 2012 (development set)",
                "sec_num": "3.1"
            },
            {
                "text": "For Run 1 on the STS 2012 data, we mapped OnWN to MSRpar, and SMTnews to SMTeuroparl, similar to Heilman and Madnani (2012) . Table 2 presents results for our submissions to the 2013 STS task. We include results for our three submissions, results for the top-ranked submission according to the weighted mean, results for the baseline provided by the task organizers, and the mean across all submissions and the baseline from the organizers. 2 Note that while our Run 2 submission outperformed the top-ranked UMBC submission on the headlines subtask, as shown in 2, there was another UMBC submission that performed better than Run 2 for the headlines subtask.",
                "cite_spans": [
                    {
                        "start": 97,
                        "end": 123,
                        "text": "Heilman and Madnani (2012)",
                        "ref_id": "BIBREF10"
                    },
                    {
                        "start": 441,
                        "end": 442,
                        "text": "2",
                        "ref_id": null
                    }
                ],
                "ref_spans": [
                    {
                        "start": 126,
                        "end": 133,
                        "text": "Table 2",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "STS 2012 (development set)",
                "sec_num": "3.1"
            },
            {
                "text": "The weighted mean correlation across tasks for our submissions was relatively poor compared to the top-ranked systems for STS 2013: our Run 1, Run 2, and Run 3 submissions beat the baseline and ranked 41st, 26th, and 48th, respectively, out of 89 submissions.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "4"
            },
            {
                "text": "The primary reason for this result is that performance of our submissions was poor for the OnWN subtask, where, e.g., our Run 2 submission's correlation was r = .4631, compared to r = .8431 for the top-ranked submission for that subtask (\"deftbaseline\"). Upon investigation, we found that OnWN training and test data were very different in terms of their score distributions. Table 1 : Pearson correlations for STS 2012 data for each subtask and then the weighted mean across subtasks. \"UKP\" was submitted by B\u00e4r et al. (2012) , \"PERPphrases\" was submitted by Heilman and Madnani (2012) , and \"mean-2012\" is the mean of all submissions to STS 2012. Table 2 : Pearson correlations for STS 2013 data for each subtask and then the weighted mean across subtasks. \"UMBC\" = \"UMBC EBIQUITY-ParingWords\", and \"mean-2013\" is the mean of the submissions to STS 2013 and the baseline.",
                "cite_spans": [
                    {
                        "start": 509,
                        "end": 526,
                        "text": "B\u00e4r et al. (2012)",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 560,
                        "end": 586,
                        "text": "Heilman and Madnani (2012)",
                        "ref_id": "BIBREF10"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 376,
                        "end": 383,
                        "text": "Table 1",
                        "ref_id": null
                    },
                    {
                        "start": 649,
                        "end": 656,
                        "text": "Table 2",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Discussion",
                "sec_num": "4"
            },
            {
                "text": "standard similarity value for the STS 2012 OnWN data was 3.87 (with a standard deviation of 1.02), while the mean for the 2013 OnWN data was 2.31 (with a standard deviation of 1.76). We speculate that our system performed relatively poorly because it was expecting the OnWN data to include many highly similar sentences (as in the 2012 data). We hypothesize that incorporating more detailed Word-Net information (only the PERP feature used Word-Net, and only in a limited fashion, to check synonymy) and task-specific features for comparing definitions might have helped performance for the OnWN subtask. If we ignore the definition comparison subtasks, and consider performance on just the headlines and SMT subtasks, the system performed quite well. Our Run 2 submission had a mean correlation of r = .5619 for those two subtasks, which would rank 5th among all submissions.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Submission",
                "sec_num": null
            },
            {
                "text": "We have not fully explored the effects on performance of the domain adaptation approach used in the system, but our approach of mapping tasks used for our Run 1 submission did not seem to help. It seems better to keep a general model, as in Runs 2 and 3.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Submission",
                "sec_num": null
            },
            {
                "text": "Additionally, we observe that the performance of Run 3, which did not use the PERP and sentiment features, was relatively good compared to Runs 1 and 2, which used all the features. This indicates that if speed and implementation simplicity are important concerns for an application, it may suffice to use relatively simple overlap and length features to measure semantic similarity.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Submission",
                "sec_num": null
            },
            {
                "text": "The contribution of domain adaptation is not clear. Mapping novel subtasks to tasks for which training data is available ( \u00a72.3), in combination with the domain adaptation technique we used, did not generally improve performance. However, we leave to future work a detailed analysis of whether the domain adaptation approach (without mapping) is better than simply training a separate system for each subtask and using out-of-domain data when indomain data is unavailable.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Submission",
                "sec_num": null
            },
            {
                "text": "In this paper, we described a system for predicting the semantic similarity of two short texts. The system uses stacking to combine a trained edit-based similarity model (Heilman and Madnani, 2012) with simple features such as word and n-gram overlap, and it uses the technique described by Daume III (2007) to support generalization to domains not represented in the training data. We also presented evaluation results, using data from the STS 2012 and STS 2013 shared tasks, that indicate that the system performs competitively relative to other approaches for many tasks. In particular, we observed very good performance on the news headline similarity and MT evaluation subtasks of the STS 2013 shared task.",
                "cite_spans": [
                    {
                        "start": 170,
                        "end": 197,
                        "text": "(Heilman and Madnani, 2012)",
                        "ref_id": "BIBREF10"
                    },
                    {
                        "start": 291,
                        "end": 307,
                        "text": "Daume III (2007)",
                        "ref_id": "BIBREF6"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "5"
            },
            {
                "text": "http://en.wikipedia.org/wiki/List_of_ acronyms_and_initialisms, downloaded April 27, 2012",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "The STS 2013 results are from http://ixa2.si. ehu.es/sts/.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            }
        ],
        "back_matter": [
            {
                "text": "We would like to thank the STS 2013 task organizers for facilitating this research and Dan Blanchard for helping with scikit-learn.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acknowledgments",
                "sec_num": null
            }
        ],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "Semeval-2012 task 6: A pilot on semantic textual similarity",
                "authors": [
                    {
                        "first": "Eneko",
                        "middle": [],
                        "last": "Agirre",
                        "suffix": ""
                    },
                    {
                        "first": "Daniel",
                        "middle": [],
                        "last": "Cer",
                        "suffix": ""
                    },
                    {
                        "first": "Mona",
                        "middle": [],
                        "last": "Diab",
                        "suffix": ""
                    },
                    {
                        "first": "Aitor",
                        "middle": [],
                        "last": "Gonzalez-Agirre",
                        "suffix": ""
                    }
                ],
                "year": 2012,
                "venue": "*SEM 2012: The First Joint Conference on Lexical and Computational Semantics",
                "volume": "1",
                "issue": "",
                "pages": "7--8",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Eneko Agirre, Daniel Cer, Mona Diab, and Aitor Gonzalez-Agirre. 2012. Semeval-2012 task 6: A pilot on semantic textual similarity. In *SEM 2012: The First Joint Conference on Lexical and Computa- tional Semantics -Volume 1: Proceedings of the main conference and the shared task, and Volume 2: Pro- ceedings of the Sixth International Workshop on Se- mantic Evaluation (SemEval 2012), pages 385-393, Montr\u00e9al, Canada, 7-8 June. Association for Compu- tational Linguistics.",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "*sem 2013 shared task: Semantic textual similarity, including a pilot on typed-similarity",
                "authors": [
                    {
                        "first": "Eneko",
                        "middle": [],
                        "last": "Agirre",
                        "suffix": ""
                    },
                    {
                        "first": "Daniel",
                        "middle": [],
                        "last": "Cer",
                        "suffix": ""
                    },
                    {
                        "first": "Mona",
                        "middle": [],
                        "last": "Diab",
                        "suffix": ""
                    },
                    {
                        "first": "Aitor",
                        "middle": [],
                        "last": "Gonzalez-Agirre",
                        "suffix": ""
                    },
                    {
                        "first": "Weiwei",
                        "middle": [],
                        "last": "Guo",
                        "suffix": ""
                    }
                ],
                "year": 2013,
                "venue": "*SEM 2013: The Second Joint Conference on Lexical and Computational Semantics",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Eneko Agirre, Daniel Cer, Mona Diab, Aitor Gonzalez- Agirre, and Weiwei Guo. 2013. *sem 2013 shared task: Semantic textual similarity, including a pilot on typed-similarity. In *SEM 2013: The Second Joint Conference on Lexical and Computational Semantics. Association for Computational Linguistics.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "UKP: Computing semantic textual similarity by combining multiple content similarity measures",
                "authors": [
                    {
                        "first": "Daniel",
                        "middle": [],
                        "last": "B\u00e4r",
                        "suffix": ""
                    },
                    {
                        "first": "Chris",
                        "middle": [],
                        "last": "Biemann",
                        "suffix": ""
                    },
                    {
                        "first": "Iryna",
                        "middle": [],
                        "last": "Gurevych",
                        "suffix": ""
                    },
                    {
                        "first": "Torsten",
                        "middle": [],
                        "last": "Zesch",
                        "suffix": ""
                    }
                ],
                "year": 2012,
                "venue": "*SEM 2012: The First Joint Conference on Lexical and Computational Semantics",
                "volume": "1",
                "issue": "",
                "pages": "7--8",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Daniel B\u00e4r, Chris Biemann, Iryna Gurevych, and Torsten Zesch. 2012. UKP: Computing semantic textual simi- larity by combining multiple content similarity mea- sures. In *SEM 2012: The First Joint Conference on Lexical and Computational Semantics -Volume 1: Proceedings of the main conference and the shared task, and Volume 2: Proceedings of the Sixth Inter- national Workshop on Semantic Evaluation (SemEval 2012), pages 435-440, Montr\u00e9al, Canada, 7-8 June. Association for Computational Linguistics.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "Building sentiment lexicon(s) from scratch for essay data",
                "authors": [
                    {
                        "first": "Jill",
                        "middle": [],
                        "last": "Beata Beigman Klebanov",
                        "suffix": ""
                    },
                    {
                        "first": "Nitin",
                        "middle": [],
                        "last": "Burstein",
                        "suffix": ""
                    },
                    {
                        "first": "Adam",
                        "middle": [],
                        "last": "Madnani",
                        "suffix": ""
                    },
                    {
                        "first": "Joel",
                        "middle": [],
                        "last": "Faulkner",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Tetreault",
                        "suffix": ""
                    }
                ],
                "year": 2012,
                "venue": "Proceedings of the 13th International Conference on Intelligent Text Processing and Computational Linguistics (CICLing)",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Beata Beigman Klebanov, Jill Burstein, Nitin Madnani, Adam Faulkner, and Joel Tetreault. 2012. Build- ing sentiment lexicon(s) from scratch for essay data. In Proceedings of the 13th International Conference on Intelligent Text Processing and Computational Lin- guistics (CICLing), New Delhi, India, March.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Natural Language Processing with Python",
                "authors": [
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Bird",
                        "suffix": ""
                    },
                    {
                        "first": "E",
                        "middle": [],
                        "last": "Klein",
                        "suffix": ""
                    },
                    {
                        "first": "E",
                        "middle": [],
                        "last": "Loper",
                        "suffix": ""
                    }
                ],
                "year": 2009,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "S. Bird, E. Klein, and E. Loper. 2009. Natural Language Processing with Python. O'Reilly Media.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Collecting highly parallel data for paraphrase evaluation",
                "authors": [
                    {
                        "first": "David",
                        "middle": [],
                        "last": "Chen",
                        "suffix": ""
                    },
                    {
                        "first": "William",
                        "middle": [],
                        "last": "Dolan",
                        "suffix": ""
                    }
                ],
                "year": 2011,
                "venue": "Proceedings of the 49th Annual Meeting of the Association for Computational Linguistics: Human Language Technologies",
                "volume": "",
                "issue": "",
                "pages": "190--200",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "David Chen and William Dolan. 2011. Collecting highly parallel data for paraphrase evaluation. In Pro- ceedings of the 49th Annual Meeting of the Associa- tion for Computational Linguistics: Human Language Technologies, pages 190-200, Portland, Oregon, USA, June. Association for Computational Linguistics.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Frustratingly easy domain adaptation",
                "authors": [
                    {
                        "first": "Hal",
                        "middle": [],
                        "last": "Daume",
                        "suffix": ""
                    },
                    {
                        "first": "Iii",
                        "middle": [],
                        "last": "",
                        "suffix": ""
                    }
                ],
                "year": 2007,
                "venue": "Proceedings of the 45th Annual Meeting of the Association of Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "256--263",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Hal Daume III. 2007. Frustratingly easy domain adapta- tion. In Proceedings of the 45th Annual Meeting of the Association of Computational Linguistics, pages 256-263, Prague, Czech Republic, June. Association for Computational Linguistics.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "Unsupervised construction of large paraphrase corpora: Exploiting massively parallel news sources",
                "authors": [
                    {
                        "first": "Bill",
                        "middle": [],
                        "last": "Dolan",
                        "suffix": ""
                    },
                    {
                        "first": "Chris",
                        "middle": [],
                        "last": "Quirk",
                        "suffix": ""
                    },
                    {
                        "first": "Chris",
                        "middle": [],
                        "last": "Brockett",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "Proceedings of Coling",
                "volume": "",
                "issue": "",
                "pages": "350--356",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Bill Dolan, Chris Quirk, and Chris Brockett. 2004. Un- supervised construction of large paraphrase corpora: Exploiting massively parallel news sources. In Pro- ceedings of Coling 2004, pages 350-356, Geneva, Switzerland, Aug 23-Aug 27. COLING.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "WordNet: An Electronic Lexical Database",
                "authors": [
                    {
                        "first": "C",
                        "middle": [],
                        "last": "Fellbaum",
                        "suffix": ""
                    }
                ],
                "year": 1998,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "C. Fellbaum. 1998. WordNet: An Electronic Lexical Database. Bradford Books.",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "Background to Framenet",
                "authors": [
                    {
                        "first": "Charles",
                        "middle": [
                            "J"
                        ],
                        "last": "Fillmore",
                        "suffix": ""
                    },
                    {
                        "first": "Christopher",
                        "middle": [
                            "R"
                        ],
                        "last": "Johnson",
                        "suffix": ""
                    },
                    {
                        "first": "Miriam",
                        "middle": [
                            "R L"
                        ],
                        "last": "Petruck",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "International Journal of Lexicography",
                "volume": "16",
                "issue": "3",
                "pages": "235--250",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Charles J. Fillmore, Christopher R. Johnson, and Miriam R.L. Petruck. 2003. Background to Framenet. International Journal of Lexicography, 16(3):235- 250.",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "ETS: Discriminative edit models for paraphrase scoring",
                "authors": [
                    {
                        "first": "Michael",
                        "middle": [],
                        "last": "Heilman",
                        "suffix": ""
                    },
                    {
                        "first": "Nitin",
                        "middle": [],
                        "last": "Madnani",
                        "suffix": ""
                    }
                ],
                "year": 2012,
                "venue": "*SEM 2012: The First Joint Conference on Lexical and Computational Semantics",
                "volume": "1",
                "issue": "",
                "pages": "7--8",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Michael Heilman and Nitin Madnani. 2012. ETS: Dis- criminative edit models for paraphrase scoring. In *SEM 2012: The First Joint Conference on Lexi- cal and Computational Semantics -Volume 1: Pro- ceedings of the main conference and the shared task, and Volume 2: Proceedings of the Sixth International Workshop on Semantic Evaluation (SemEval 2012), pages 529-535, Montr\u00e9al, Canada, 7-8 June. Associa- tion for Computational Linguistics.",
                "links": null
            },
            "BIBREF11": {
                "ref_id": "b11",
                "title": "c-rater: Scoring of short-answer questions. Computers and the Humanities",
                "authors": [
                    {
                        "first": "C",
                        "middle": [],
                        "last": "Leacock",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Chodorow",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "",
                "volume": "37",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "C. Leacock and M. Chodorow. 2003. c-rater: Scoring of short-answer questions. Computers and the Humani- ties, 37.",
                "links": null
            },
            "BIBREF12": {
                "ref_id": "b12",
                "title": "Classification errors in a domain-independent assessment system",
                "authors": [
                    {
                        "first": "Rodney",
                        "middle": [
                            "D"
                        ],
                        "last": "Nielsen",
                        "suffix": ""
                    },
                    {
                        "first": "Wayne",
                        "middle": [],
                        "last": "Ward",
                        "suffix": ""
                    },
                    {
                        "first": "James",
                        "middle": [
                            "H"
                        ],
                        "last": "Martin",
                        "suffix": ""
                    }
                ],
                "year": 2008,
                "venue": "Proceedings of the Third Workshop on Innovative Use of NLP for Building Educational Applications",
                "volume": "",
                "issue": "",
                "pages": "10--18",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Rodney D. Nielsen, Wayne Ward, and James H. Martin. 2008. Classification errors in a domain-independent assessment system. In Proceedings of the Third Work- shop on Innovative Use of NLP for Building Educa- tional Applications, pages 10-18, Columbus, Ohio, June. Association for Computational Linguistics.",
                "links": null
            },
            "BIBREF13": {
                "ref_id": "b13",
                "title": "Scikit-learn: Machine learning in Python",
                "authors": [
                    {
                        "first": "F",
                        "middle": [],
                        "last": "Pedregosa",
                        "suffix": ""
                    },
                    {
                        "first": "G",
                        "middle": [],
                        "last": "Varoquaux",
                        "suffix": ""
                    },
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Gramfort",
                        "suffix": ""
                    },
                    {
                        "first": "V",
                        "middle": [],
                        "last": "Michel",
                        "suffix": ""
                    },
                    {
                        "first": "B",
                        "middle": [],
                        "last": "Thirion",
                        "suffix": ""
                    },
                    {
                        "first": "O",
                        "middle": [],
                        "last": "Grisel",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Blondel",
                        "suffix": ""
                    },
                    {
                        "first": "P",
                        "middle": [],
                        "last": "Prettenhofer",
                        "suffix": ""
                    },
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Weiss",
                        "suffix": ""
                    },
                    {
                        "first": "V",
                        "middle": [],
                        "last": "Dubourg",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Vanderplas",
                        "suffix": ""
                    },
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Passos",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Cournapeau",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Brucher",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Perrot",
                        "suffix": ""
                    },
                    {
                        "first": "E",
                        "middle": [],
                        "last": "Duchesnay",
                        "suffix": ""
                    }
                ],
                "year": 2011,
                "venue": "Journal of Machine Learning Research",
                "volume": "12",
                "issue": "",
                "pages": "2825--2830",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "F. Pedregosa, G. Varoquaux, A. Gramfort, V. Michel, B. Thirion, O. Grisel, M. Blondel, P. Prettenhofer, R. Weiss, V. Dubourg, J. Vanderplas, A. Passos, D. Cournapeau, M. Brucher, M. Perrot, and E. Duches- nay. 2011. Scikit-learn: Machine learning in Python. Journal of Machine Learning Research, 12:2825- 2830.",
                "links": null
            },
            "BIBREF14": {
                "ref_id": "b14",
                "title": "OntoNotes: The 90% solution",
                "authors": [
                    {
                        "first": "S",
                        "middle": [
                            "S"
                        ],
                        "last": "Pradhan",
                        "suffix": ""
                    },
                    {
                        "first": "N",
                        "middle": [],
                        "last": "Xue",
                        "suffix": ""
                    }
                ],
                "year": 2009,
                "venue": "The 2009 Annual Conference of the North American Chapter of the Association for Computational Linguistics, Companion Volume: Tutorial Abstracts",
                "volume": "",
                "issue": "",
                "pages": "11--12",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "S. S. Pradhan and N. Xue. 2009. OntoNotes: The 90% solution. In Proceedings of Human Language Tech- nologies: The 2009 Annual Conference of the North American Chapter of the Association for Computa- tional Linguistics, Companion Volume: Tutorial Ab- stracts, pages 11-12.",
                "links": null
            },
            "BIBREF15": {
                "ref_id": "b15",
                "title": "The NIST 2008 metrics for machine translation challenge -overview, methodology, metrics, and results. Machine Translation",
                "authors": [
                    {
                        "first": "M",
                        "middle": [
                            "A"
                        ],
                        "last": "Przybocki",
                        "suffix": ""
                    },
                    {
                        "first": "K",
                        "middle": [],
                        "last": "Peterson",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Bronsart",
                        "suffix": ""
                    },
                    {
                        "first": "G",
                        "middle": [
                            "A"
                        ],
                        "last": "Sanders",
                        "suffix": ""
                    }
                ],
                "year": 2009,
                "venue": "",
                "volume": "23",
                "issue": "",
                "pages": "71--103",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "M. A. Przybocki, K. Peterson, S. Bronsart, and G. A. Sanders. 2009. The NIST 2008 metrics for machine translation challenge -overview, methodology, met- rics, and results. Machine Translation, 23(2-3):71- 103.",
                "links": null
            },
            "BIBREF16": {
                "ref_id": "b16",
                "title": "TER-Plus: Paraphrase, semantic, and alignment enhancements to translation edit rate",
                "authors": [
                    {
                        "first": "G",
                        "middle": [],
                        "last": "Matthew",
                        "suffix": ""
                    },
                    {
                        "first": "Nitin",
                        "middle": [],
                        "last": "Snover",
                        "suffix": ""
                    },
                    {
                        "first": "Bonnie",
                        "middle": [],
                        "last": "Madnani",
                        "suffix": ""
                    },
                    {
                        "first": "Richard",
                        "middle": [],
                        "last": "Dorr",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Schwartz",
                        "suffix": ""
                    }
                ],
                "year": 2009,
                "venue": "Machine Translation",
                "volume": "23",
                "issue": "2-3",
                "pages": "117--127",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Matthew G. Snover, Nitin Madnani, Bonnie Dorr, and Richard Schwartz. 2009. TER-Plus: Paraphrase, semantic, and alignment enhancements to translation edit rate. Machine Translation, 23(2-3):117-127, September.",
                "links": null
            },
            "BIBREF17": {
                "ref_id": "b17",
                "title": "TakeLab: Systems for measuring semantic text similarity",
                "authors": [
                    {
                        "first": "Goran",
                        "middle": [],
                        "last": "Frane\u0161ari\u0107",
                        "suffix": ""
                    },
                    {
                        "first": "Mladen",
                        "middle": [],
                        "last": "Glava\u0161",
                        "suffix": ""
                    },
                    {
                        "first": "Jan\u0161najder",
                        "middle": [],
                        "last": "Karan",
                        "suffix": ""
                    },
                    {
                        "first": "Bojana Dalbelo",
                        "middle": [],
                        "last": "Ba\u0161i\u0107",
                        "suffix": ""
                    }
                ],
                "year": 2012,
                "venue": "*SEM 2012: The First Joint Conference on Lexical and Computational Semantics",
                "volume": "1",
                "issue": "",
                "pages": "7--8",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Frane\u0160ari\u0107, Goran Glava\u0161, Mladen Karan, Jan\u0160najder, and Bojana Dalbelo Ba\u0161i\u0107. 2012. TakeLab: Systems for measuring semantic text similarity. In *SEM 2012: The First Joint Conference on Lexical and Computa- tional Semantics -Volume 1: Proceedings of the main conference and the shared task, and Volume 2: Pro- ceedings of the Sixth International Workshop on Se- mantic Evaluation (SemEval 2012), pages 441-448, Montr\u00e9al, Canada, 7-8 June. Association for Compu- tational Linguistics.",
                "links": null
            },
            "BIBREF18": {
                "ref_id": "b18",
                "title": "What is the Jeopardy model? a quasisynchronous grammar for QA",
                "authors": [
                    {
                        "first": "Mengqiu",
                        "middle": [],
                        "last": "Wang",
                        "suffix": ""
                    },
                    {
                        "first": "Noah",
                        "middle": [
                            "A"
                        ],
                        "last": "Smith",
                        "suffix": ""
                    },
                    {
                        "first": "Teruko",
                        "middle": [],
                        "last": "Mitamura",
                        "suffix": ""
                    }
                ],
                "year": 2007,
                "venue": "Proceedings of the 2007 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning (EMNLP-CoNLL)",
                "volume": "",
                "issue": "",
                "pages": "22--32",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Mengqiu Wang, Noah A. Smith, and Teruko Mita- mura. 2007. What is the Jeopardy model? a quasi- synchronous grammar for QA. In Proceedings of the 2007 Joint Conference on Empirical Methods in Natu- ral Language Processing and Computational Natural Language Learning (EMNLP-CoNLL), pages 22-32, Prague, Czech Republic, June. Association for Com- putational Linguistics.",
                "links": null
            },
            "BIBREF19": {
                "ref_id": "b19",
                "title": "Stacked generalization",
                "authors": [
                    {
                        "first": "H",
                        "middle": [],
                        "last": "David",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Wolpert",
                        "suffix": ""
                    }
                ],
                "year": 1992,
                "venue": "Neural Networks",
                "volume": "5",
                "issue": "",
                "pages": "241--259",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "David H. Wolpert. 1992. Stacked generalization. Neural Networks, 5:241-259.",
                "links": null
            }
        },
        "ref_entries": {}
    }
}