File size: 78,558 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
{
    "paper_id": "P06-1004",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T09:23:54.845149Z"
    },
    "title": "Minimum Cut Model for Spoken Lecture Segmentation",
    "authors": [
        {
            "first": "Igor",
            "middle": [],
            "last": "Malioutov",
            "suffix": "",
            "affiliation": {
                "laboratory": "Artificial Intelligence Laboratory",
                "institution": "Massachusetts Institute of Technology",
                "location": {}
            },
            "email": "igorm@csail.mit.edu"
        },
        {
            "first": "Regina",
            "middle": [],
            "last": "Barzilay",
            "suffix": "",
            "affiliation": {
                "laboratory": "Artificial Intelligence Laboratory",
                "institution": "Massachusetts Institute of Technology",
                "location": {}
            },
            "email": "regina@csail.mit.edu"
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "We consider the task of unsupervised lecture segmentation. We formalize segmentation as a graph-partitioning task that optimizes the normalized cut criterion. Our approach moves beyond localized comparisons and takes into account longrange cohesion dependencies. Our results demonstrate that global analysis improves the segmentation accuracy and is robust in the presence of speech recognition errors.",
    "pdf_parse": {
        "paper_id": "P06-1004",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "We consider the task of unsupervised lecture segmentation. We formalize segmentation as a graph-partitioning task that optimizes the normalized cut criterion. Our approach moves beyond localized comparisons and takes into account longrange cohesion dependencies. Our results demonstrate that global analysis improves the segmentation accuracy and is robust in the presence of speech recognition errors.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "The development of computational models of text structure is a central concern in natural language processing. Text segmentation is an important instance of such work. The task is to partition a text into a linear sequence of topically coherent segments and thereby induce a content structure of the text. The applications of the derived representation are broad, encompassing information retrieval, question-answering and summarization.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Not surprisingly, text segmentation has been extensively investigated over the last decade. Following the first unsupervised segmentation approach by Hearst (1994) , most algorithms assume that variations in lexical distribution indicate topic changes. When documents exhibit sharp variations in lexical distribution, these algorithms are likely to detect segment boundaries accurately. For example, most algorithms achieve high performance on synthetic collections, generated by concatenation of random text blocks (Choi, 2000) . The difficulty arises, however, when transitions between topics are smooth and distributional variations are subtle. This is evident in the performance of existing unsupervised algorithms on less structured datasets, such as spoken meeting transcripts (Galley et al., 2003) . Therefore, a more refined analysis of lexical distribution is needed.",
                "cite_spans": [
                    {
                        "start": 150,
                        "end": 163,
                        "text": "Hearst (1994)",
                        "ref_id": "BIBREF8"
                    },
                    {
                        "start": 516,
                        "end": 528,
                        "text": "(Choi, 2000)",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 783,
                        "end": 804,
                        "text": "(Galley et al., 2003)",
                        "ref_id": "BIBREF4"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Our work addresses this challenge by casting text segmentation in a graph-theoretic framework. We abstract a text into a weighted undirected graph, where the nodes of the graph correspond to sentences and edge weights represent the pairwise sentence similarity. In this framework, text segmentation corresponds to a graph partitioning that optimizes the normalized-cut criterion (Shi and Malik, 2000) . This criterion measures both the similarity within each partition and the dissimilarity across different partitions. Thus, our approach moves beyond localized comparisons and takes into account long-range changes in lexical distribution. Our key hypothesis is that global analysis yields more accurate segmentation results than local models.",
                "cite_spans": [
                    {
                        "start": 379,
                        "end": 400,
                        "text": "(Shi and Malik, 2000)",
                        "ref_id": "BIBREF16"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "We tested our algorithm on a corpus of spoken lectures. Segmentation in this domain is challenging in several respects. Being less structured than written text, lecture material exhibits digressions, disfluencies, and other artifacts of spontaneous communication. In addition, the output of speech recognizers is fraught with high word error rates due to specialized technical vocabulary and lack of in-domain spoken data for training. Finally, pedagogical considerations call for fluent transitions between different topics in a lecture, further complicating the segmentation task.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Our experimental results confirm our hypothesis: considering long-distance lexical dependencies yields substantial gains in segmentation performance. Our graph-theoretic approach compares favorably to state-of-the-art segmentation algorithms and attains results close to the range of human agreement scores. Another attractive prop-erty of the algorithm is its robustness to noise: the accuracy of our algorithm does not deteriorate significantly when applied to speech recognition output.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Most unsupervised algorithms assume that fragments of text with homogeneous lexical distribution correspond to topically coherent segments. Previous research has analyzed various facets of lexical distribution, including lexical weighting, similarity computation, and smoothing (Hearst, 1994; Utiyama and Isahara, 2001; Choi, 2000; Reynar, 1998; Kehagias et al., 2003; Ji and Zha, 2003) .",
                "cite_spans": [
                    {
                        "start": 278,
                        "end": 292,
                        "text": "(Hearst, 1994;",
                        "ref_id": "BIBREF8"
                    },
                    {
                        "start": 293,
                        "end": 319,
                        "text": "Utiyama and Isahara, 2001;",
                        "ref_id": "BIBREF18"
                    },
                    {
                        "start": 320,
                        "end": 331,
                        "text": "Choi, 2000;",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 332,
                        "end": 345,
                        "text": "Reynar, 1998;",
                        "ref_id": "BIBREF14"
                    },
                    {
                        "start": 346,
                        "end": 368,
                        "text": "Kehagias et al., 2003;",
                        "ref_id": "BIBREF10"
                    },
                    {
                        "start": 369,
                        "end": 386,
                        "text": "Ji and Zha, 2003)",
                        "ref_id": "BIBREF9"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Previous Work",
                "sec_num": "2"
            },
            {
                "text": "The focus of our work, however, is on an orthogonal yet fundamental aspect of this analysis -the impact of long-range cohesion dependencies on segmentation performance. In contrast to previous approaches, the homogeneity of a segment is determined not only by the similarity of its words, but also by their relation to words in other segments of the text. We show that optimizing our global objective enables us to detect subtle topical changes.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Previous Work",
                "sec_num": "2"
            },
            {
                "text": "Graph-Theoretic Approaches in Vision Segmentation Our work is inspired by minimum-cutbased segmentation algorithms developed for image analysis. Shi and Malik (2000) introduced the normalized-cut criterion and demonstrated its practical benefits for segmenting static images.",
                "cite_spans": [
                    {
                        "start": 145,
                        "end": 165,
                        "text": "Shi and Malik (2000)",
                        "ref_id": "BIBREF16"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Previous Work",
                "sec_num": "2"
            },
            {
                "text": "Our method, however, is not a simple application of the existing approach to a new task. First, in order to make it work in the new linguistic framework, we had to redefine the underlying representation and introduce a variety of smoothing and lexical weighting techniques. Second, the computational techniques for finding the optimal partitioning are also quite different. Since the minimization of the normalized cut is N P -complete in the general case, researchers in vision have to approximate this computation. Fortunately, we can find an exact solution due to the linearity constraint on text segmentation.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Previous Work",
                "sec_num": "2"
            },
            {
                "text": "Linguistic research has shown that word repetition in a particular section of a text is a device for creating thematic cohesion (Halliday and Hasan, 1976) , and that changes in the lexical distributions usually signal topic transitions. Figure 1 illustrates these properties in a lecture transcript from an undergraduate Physics class. We use the text Dotplotting representation by (Church, 1993) and plot the cosine similarity scores between every pair of sentences in the text. The intensity of a point (i, j) on the plot indicates the degree to which the i-th sentence in the text is similar to the j-th sentence. The true segment boundaries are denoted by vertical lines. This similarity plot reveals a block structure where true boundaries delimit blocks of text with high inter-sentential similarity. Sentences found in different blocks, on the other hand, tend to exhibit low similarity. Formalizing the Objective Whereas previous unsupervised approaches to segmentation rested on intuitive notions of similarity density, we formalize the objective of text segmentation through cuts on graphs. We aim to jointly maximize the intra-segmental similarity and minimize the similarity between different segments. In other words, we want to find the segmentation with a maximally homogeneous set of segments that are also maxi-mally different from each other.",
                "cite_spans": [
                    {
                        "start": 128,
                        "end": 154,
                        "text": "(Halliday and Hasan, 1976)",
                        "ref_id": null
                    },
                    {
                        "start": 382,
                        "end": 396,
                        "text": "(Church, 1993)",
                        "ref_id": "BIBREF3"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 237,
                        "end": 245,
                        "text": "Figure 1",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Minimum Cut Framework",
                "sec_num": "3"
            },
            {
                "text": "u 1 u 2 u 3 u n",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Minimum Cut Framework",
                "sec_num": "3"
            },
            {
                "text": "Let G = {V, E} be an undirected, weighted graph, where V is the set of nodes corresponding to sentences in the text and E is the set of weighted edges (See Figure 2) . The edge weights, w(u, v), define a measure of similarity between pairs of nodes u and v, where higher scores indicate higher similarity. Section 4 provides more details on graph construction.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 156,
                        "end": 165,
                        "text": "Figure 2)",
                        "ref_id": "FIGREF1"
                    }
                ],
                "eq_spans": [],
                "section": "Minimum Cut Framework",
                "sec_num": "3"
            },
            {
                "text": "We consider the problem of partitioning the graph into two disjoint sets of nodes A and B. We aim to minimize the cut, which is defined to be the sum of the crossing edges between the two sets of nodes. In other words, we want to split the sentences into two maximally dissimilar classes by choosing A and B to minimize:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Minimum Cut Framework",
                "sec_num": "3"
            },
            {
                "text": "cut(A, B) = u\u2208A,v\u2208B w(u, v)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Minimum Cut Framework",
                "sec_num": "3"
            },
            {
                "text": "However, we need to ensure that the two partitions are not only maximally different from each other, but also that they are themselves homogeneous by accounting for intra-partition node similarity. We formulate this requirement in the framework of normalized cuts (Shi and Malik, 2000) , where the cut value is normalized by the volume of the corresponding partitions. The volume of the partition is the sum of its edges to the whole graph:",
                "cite_spans": [
                    {
                        "start": 264,
                        "end": 285,
                        "text": "(Shi and Malik, 2000)",
                        "ref_id": "BIBREF16"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Minimum Cut Framework",
                "sec_num": "3"
            },
            {
                "text": "vol(A) = u\u2208A,v\u2208V w(u, v)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Minimum Cut Framework",
                "sec_num": "3"
            },
            {
                "text": "The normalized cut criterion (N cut) is then defined as follows:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Minimum Cut Framework",
                "sec_num": "3"
            },
            {
                "text": "N cut(A, B) = cut(A, B) vol(A) + cut(A, B) vol(B)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Minimum Cut Framework",
                "sec_num": "3"
            },
            {
                "text": "By minimizing this objective we simultaneously minimize the similarity across partitions and maximize the similarity within partitions. This formulation also allows us to decompose the objective into a sum of individual terms, and formulate a dynamic programming solution to the multiway cut problem.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Minimum Cut Framework",
                "sec_num": "3"
            },
            {
                "text": "This criterion is naturally extended to a k-way normalized cut:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Minimum Cut Framework",
                "sec_num": "3"
            },
            {
                "text": "N cut k (V ) = cut(A1, V \u2212 A1) vol(A1) + . . . + cut(A k , V \u2212 A k ) vol(A k )",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Minimum Cut Framework",
                "sec_num": "3"
            },
            {
                "text": "where A 1 . . . A k form a partition of the graph, and V \u2212A k is the set difference between the entire graph and partition k.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Minimum Cut Framework",
                "sec_num": "3"
            },
            {
                "text": "Decoding Papadimitriou proved that the problem of minimizing normalized cuts on graphs is N P -complete (Shi and Malik, 2000) . However, in our case, the multi-way cut is constrained to preserve the linearity of the segmentation. By segmentation linearity, we mean that all of the nodes between the leftmost and the rightmost nodes of a particular partition have to belong to that partition. With this constraint, we formulate a dynamic programming algorithm for exactly finding the minimum normalized multiway cut in polynomial time:",
                "cite_spans": [
                    {
                        "start": 104,
                        "end": 125,
                        "text": "(Shi and Malik, 2000)",
                        "ref_id": "BIBREF16"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Minimum Cut Framework",
                "sec_num": "3"
            },
            {
                "text": "C [i, k] = min j<k C [i \u2212 1, j] + cut [A j,k , V \u2212 A j,k ] vol [A j,k ] (1) B [i, k] = argmin j<k C [i \u2212 1, j] + cut [A j,k , V \u2212 A j,k ] vol [A j,k ] (2) s.t. C [0, 1] = 0, C [0, k] = \u221e, 1 < k \u2264 N (3) B [0, k] = 1, 1 \u2264 k \u2264 N (4) C [i, k]",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Minimum Cut Framework",
                "sec_num": "3"
            },
            {
                "text": "is the normalized cut value of the optimal segmentation of the first k sentences into i segments. The i-th segment, A j,k , begins at node u j and ends at node u k . B [i, k] is the back-pointer table from which we recover the optimal sequence of segment boundaries. Equations 3 and 4 capture respectively the condition that the normalized cut value of the trivial segmentation of an empty text into one segment is zero and the constraint that the first segment starts with the first node.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Minimum Cut Framework",
                "sec_num": "3"
            },
            {
                "text": "The time complexity of the dynamic programming algorithm is O(KN 2 ), where K is the number of partitions and N is the number of nodes in the graph or sentences in the transcript.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Minimum Cut Framework",
                "sec_num": "3"
            },
            {
                "text": "Clearly, the performance of our model depends on the underlying representation, the definition of the pairwise similarity function, and various other model parameters. In this section we provide further details on the graph construction process.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Building the Graph",
                "sec_num": "4"
            },
            {
                "text": "Preprocessing Before building the graph, we apply standard text preprocessing techniques to the text. We stem words with the Porter stemmer (Porter, 1980) to alleviate the sparsity of word counts through stem equivalence classes. We also remove words matching a prespecified list of stop words.",
                "cite_spans": [
                    {
                        "start": 140,
                        "end": 154,
                        "text": "(Porter, 1980)",
                        "ref_id": "BIBREF13"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Building the Graph",
                "sec_num": "4"
            },
            {
                "text": "Graph Topology As we noted in the previous section, the normalized cut criterion considers long-term similarity relationships between nodes. This effect is achieved by constructing a fullyconnected graph. However, considering all pairwise relations in a long text may be detrimental to segmentation accuracy. Therefore, we discard edges between sentences exceeding a certain threshold distance. This reduction in the graph size also provides us with computational savings.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Building the Graph",
                "sec_num": "4"
            },
            {
                "text": "Similarity Computation In computing pairwise sentence similarities, sentences are represented as vectors of word counts. Cosine similarity is commonly used in text segmentation (Hearst, 1994) . To avoid numerical precision issues when summing a series of very small scores, we compute exponentiated cosine similarity scores between pairs of sentence vectors:",
                "cite_spans": [
                    {
                        "start": 177,
                        "end": 191,
                        "text": "(Hearst, 1994)",
                        "ref_id": "BIBREF8"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Building the Graph",
                "sec_num": "4"
            },
            {
                "text": "w(s i , s j ) = e s i \u2022s j ||s i ||\u00d7||s j ||",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Building the Graph",
                "sec_num": "4"
            },
            {
                "text": "We further refine our analysis by smoothing the similarity metric. When comparing two sentences, we also take into account similarity between their immediate neighborhoods. The smoothing is achieved by adding counts of words that occur in adjoining sentences to the current sentence feature vector. These counts are weighted in accordance to their distance from the current sentence:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Building the Graph",
                "sec_num": "4"
            },
            {
                "text": "s i = i+k j=i e \u2212\u03b1(j\u2212i) s j ,",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Building the Graph",
                "sec_num": "4"
            },
            {
                "text": "where s i are vectors of word counts, and \u03b1 is a parameter that controls the degree of smoothing.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Building the Graph",
                "sec_num": "4"
            },
            {
                "text": "In the formulation above we use sentences as our nodes. However, we can also represent graph nodes with non-overlapping blocks of words of fixed length. This is desirable, since the lecture transcripts lack sentence boundary markers, and short utterances can skew the cosine similarity scores. The optimal length of the block is tuned on a heldout development set.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Building the Graph",
                "sec_num": "4"
            },
            {
                "text": "Lexical Weighting Previous research has shown that weighting schemes play an important role in segmentation performance (Ji and Zha, 2003; Choi et al., 2001 ). Of particular concern are words that may not be common in general English discourse but that occur throughout the text for a particular lecture or subject. For example, in a lecture about support vector machines, the occurrence of the term \"SVM\" is not going to convey a lot of information about the distribution of In order to address this issue, we introduce a variation on the tf-idf scoring scheme used in the information-retrieval literature (Salton and Buckley, 1988) . A transcript is split uniformly into N chunks; each chunk serves as the equivalent of documents in the tf-idf computation. The weights are computed separately for each transcript, since topic and word distributions vary across lectures.",
                "cite_spans": [
                    {
                        "start": 120,
                        "end": 138,
                        "text": "(Ji and Zha, 2003;",
                        "ref_id": "BIBREF9"
                    },
                    {
                        "start": 139,
                        "end": 156,
                        "text": "Choi et al., 2001",
                        "ref_id": "BIBREF1"
                    },
                    {
                        "start": 607,
                        "end": 633,
                        "text": "(Salton and Buckley, 1988)",
                        "ref_id": "BIBREF15"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Building the Graph",
                "sec_num": "4"
            },
            {
                "text": "In this section we present the different corpora used to evaluate our model and provide a brief overview of the evaluation metrics. Next, we describe our human segmentation study on the corpus of spoken lecture data.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation Set-Up",
                "sec_num": "5"
            },
            {
                "text": "A heldout development set of three lectures isused for estimating the optimal word block length for representing nodes, the threshold distances for discarding node edges, the number of uniform chunks for estimating tf-idf lexical weights, the alpha parameter for smoothing, and the length of the smoothing window. We use a simple greedy search procedure for optimizing the parameters.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Parameter Estimation",
                "sec_num": "5.1"
            },
            {
                "text": "We evaluate our segmentation algorithm on three sets of data. Two of the datasets we use are new segmentation collections that we have compiled for this study, 1 and the remaining set includes a standard collection previously used for evaluation of segmentation algorithms. Various corpus statistics for the new datasets are presented in Table 1 . Below we briefly describe each corpus.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 338,
                        "end": 345,
                        "text": "Table 1",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Corpora",
                "sec_num": "5.2"
            },
            {
                "text": "Physics Lectures Our first corpus consists of spoken lecture transcripts from an undergraduate Physics class. In contrast to other segmentation datasets, our corpus contains much longer texts. A typical lecture of 90 minutes has 500 to 700 sentences with 8500 words, which corresponds to about 15 pages of raw text. We have access both to manual transcriptions of these lectures and also output from an automatic speech recognition system. The word error rate for the latter is 19.4%, 2 which is representative of state-of-the-art performance on lecture material (Leeuwis et al., 2003) .",
                "cite_spans": [
                    {
                        "start": 563,
                        "end": 585,
                        "text": "(Leeuwis et al., 2003)",
                        "ref_id": "BIBREF11"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Corpora",
                "sec_num": "5.2"
            },
            {
                "text": "The Physics lecture transcript segmentations were produced by the teaching staff of the introductory Physics course at the Massachusetts Institute of Technology. Their objective was to facilitate access to lecture recordings available on the class website. This segmentation conveys the high-level topical structure of the lectures. On average, a lecture was annotated with six segments, and a typical segment corresponds to two pages of a transcript.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Corpora",
                "sec_num": "5.2"
            },
            {
                "text": "Artificial Intelligence Lectures Our second lecture corpus differs in subject matter, lecturing style, and segmentation granularity. The graduate Artificial Intelligence class has, on average, twelve segments per lecture, and a typical segment is about half of a page. One segment roughly corresponds to the content of a slide. This time the segmentation was obtained from the lecturer herself. The lecturer went through the transcripts of lecture recordings and segmented the lectures with the objective of making the segments correspond to presentation slides for the lectures.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Corpora",
                "sec_num": "5.2"
            },
            {
                "text": "Due to the low recording quality, we were unable to obtain the ASR transcripts for this class. Therefore, we only use manual transcriptions of these lectures.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Corpora",
                "sec_num": "5.2"
            },
            {
                "text": "Synthetic Corpus Also as part of our analysis, we used the synthetic corpus created by Choi (2000) which is commonly used in the evaluation of segmentation algorithms. This corpus consists of a set of concatenated segments randomly sampled from the Brown corpus. The length of the segments in this corpus ranges from three to eleven sentences. It is important to note that the lexical transitions in these concatenated texts are very sharp, since the segments come from texts written in widely varying language styles on completely different topics.",
                "cite_spans": [
                    {
                        "start": 87,
                        "end": 98,
                        "text": "Choi (2000)",
                        "ref_id": "BIBREF2"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Corpora",
                "sec_num": "5.2"
            },
            {
                "text": "We use the P k and WindowDiff measures to evaluate our system (Beeferman et al., 1999; Pevzner and Hearst, 2002) . The P k measure estimates the probability that a randomly chosen pair of words within a window of length k words is inconsistently classified. The WindowDiff metric is a variant of the P k measure, which penalizes false positives on an equal basis with near misses.",
                "cite_spans": [
                    {
                        "start": 62,
                        "end": 86,
                        "text": "(Beeferman et al., 1999;",
                        "ref_id": "BIBREF0"
                    },
                    {
                        "start": 87,
                        "end": 112,
                        "text": "Pevzner and Hearst, 2002)",
                        "ref_id": "BIBREF12"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation Metric",
                "sec_num": "5.3"
            },
            {
                "text": "Both of these metrics are defined with respect to the average segment length of texts and exhibit high variability on real data. We follow Choi (2000) and compute the mean segment length used in determining the parameter k on each reference text separately.",
                "cite_spans": [
                    {
                        "start": 139,
                        "end": 150,
                        "text": "Choi (2000)",
                        "ref_id": "BIBREF2"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation Metric",
                "sec_num": "5.3"
            },
            {
                "text": "We also plot the Receiver Operating Characteristic (ROC) curve to gauge performance at a finer level of discrimination (Swets, 1988) . The ROC plot is the plot of the true positive rate against the false positive rate for various settings of a decision criterion. In our case, the true positive rate is the fraction of boundaries correctly classified, and the false positive rate is the fraction of non-boundary positions incorrectly classified as boundaries. In computing the true and false positive rates, we vary the threshold distance to the true boundary within which a hypothesized boundary is considered correct. Larger areas under the ROC curve of a classifier indicate better discriminative performance.",
                "cite_spans": [
                    {
                        "start": 119,
                        "end": 132,
                        "text": "(Swets, 1988)",
                        "ref_id": "BIBREF17"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Evaluation Metric",
                "sec_num": "5.3"
            },
            {
                "text": "Spoken lectures are very different in style from other corpora used in human segmentation studies (Hearst, 1994; Galley et al., 2003) . We are interested in analyzing human performance on a corpus of lecture transcripts with much longer texts and a less clear-cut concept of a sub-topic. We define a segment to be a sub-topic that signals a prominent shift in subject matter. Disregarding this sub-topic change would impair the high-level understanding of the structure and the content of the lecture.",
                "cite_spans": [
                    {
                        "start": 98,
                        "end": 112,
                        "text": "(Hearst, 1994;",
                        "ref_id": "BIBREF8"
                    },
                    {
                        "start": 113,
                        "end": 133,
                        "text": "Galley et al., 2003)",
                        "ref_id": "BIBREF4"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Human Segmentation Study",
                "sec_num": "5.4"
            },
            {
                "text": "As part of our human segmentation analysis, we asked three annotators to segment the Physics lecture corpus. These annotators had taken the class in the past and were familiar with the subject matter under consideration. We wrote a detailed instruction manual for the task, with annotation guidelines for the most part following the model used by Gruenstein et al. (2005) . The annotators were instructed to segment at a level of granularity O A B C MEAN SEG. COUNT 6.6 8.9 18.4 13.8 MEAN SEG. LENGTH 69.4 51.5 24.9 33.2 SEG. LENGTH DEV. 39.6 37.4 34.5 39.4 that would identify most of the prominent topical transitions necessary for a summary of the lecture. The annotators used the NOMOS annotation software toolkit, developed for meeting segmentation (Gruenstein et al., 2005) . They were provided with recorded audio of the lectures and the corresponding text transcriptions. We intentionally did not provide the subjects with the target number of boundaries, since we wanted to see if the annotators would converge on a common segmentation granularity. Table 2 presents the annotator segmentation statistics. We see two classes of segmentation granularities. The original reference (O) and annotator A segmented at a coarse level with an average of 6.6 and 8.9 segments per lecture, respectively. Annotators B and C operated at much finer levels of discrimination with 18.4 and 13.8 segments per lecture on average. We conclude that multiple levels of granularity are acceptable in spoken lecture segmentation. This is expected given the length of the lectures and varying human judgments in selecting relevant topical content.",
                "cite_spans": [
                    {
                        "start": 347,
                        "end": 371,
                        "text": "Gruenstein et al. (2005)",
                        "ref_id": "BIBREF6"
                    },
                    {
                        "start": 754,
                        "end": 779,
                        "text": "(Gruenstein et al., 2005)",
                        "ref_id": "BIBREF6"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 1058,
                        "end": 1065,
                        "text": "Table 2",
                        "ref_id": "TABREF1"
                    }
                ],
                "eq_spans": [],
                "section": "Human Segmentation Study",
                "sec_num": "5.4"
            },
            {
                "text": "Following previous studies, we quantify the level of annotator agreement with the P k measure (Gruenstein et al., 2005 ). 3 Table 3 shows the annotator agreement scores between different pairs of annotators. P k measures ranged from 0.24 and 0.42. We observe greater consistency at similar levels of granularity, and less so across the two Table 4 : Edges between nodes separated beyond a certain threshold distance are removed.",
                "cite_spans": [
                    {
                        "start": 94,
                        "end": 118,
                        "text": "(Gruenstein et al., 2005",
                        "ref_id": "BIBREF6"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 124,
                        "end": 131,
                        "text": "Table 3",
                        "ref_id": "TABREF2"
                    },
                    {
                        "start": 340,
                        "end": 347,
                        "text": "Table 4",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Human Segmentation Study",
                "sec_num": "5.4"
            },
            {
                "text": "classes. Note that annotator A operated at a level of granularity consistent with the original reference segmentation. Hence, the 0.24 P k measure score serves as the benchmark with which we can compare the results attained by segmentation algorithms on the Physics lecture data.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Human Segmentation Study",
                "sec_num": "5.4"
            },
            {
                "text": "As an additional point of reference we note that the uniform and random baseline segmentations attain 0.469 and 0.493 P k measure, respectively, on the Physics lecture set. ",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Human Segmentation Study",
                "sec_num": "5.4"
            },
            {
                "text": "We first determine the impact of long-range pairwise similarity dependencies on segmentation performance. Our Table 5 : Performance analysis of different algorithms using the P k and WindowDiff measures, with three lectures heldout for development.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 110,
                        "end": 117,
                        "text": "Table 5",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Benefits of global analysis",
                "sec_num": null
            },
            {
                "text": "key hypothesis is that considering long-distance lexical relations contributes to the effectiveness of the algorithm. To test this hypothesis, we discard edges between nodes that are more than a certain number of sentences apart. We test the system on a range of data sets, including the Physics and AI lectures and the synthetic corpus created by Choi (2000) . We also include segmentation results on Physics ASR transcripts.",
                "cite_spans": [
                    {
                        "start": 348,
                        "end": 359,
                        "text": "Choi (2000)",
                        "ref_id": "BIBREF2"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Benefits of global analysis",
                "sec_num": null
            },
            {
                "text": "The results in Table 4 confirm our hypothesistaking into account non-local lexical dependencies helps across different domains. On manually transcribed Physics lecture data, for example, the algorithm yields 0.394 P k measure when taking into account edges separated by up to ten sentences. When dependencies up to a hundred sentences are considered, the algorithm yields a 25% reduction in P k measure. Figure 3 shows the ROC plot for the segmentation of the Physics lecture data with different cutoff parameters, again demonstrating clear gains attained by employing longrange dependencies. As Table 4 shows, the improvement is consistent across all lecture datasets. We note, however, that after some point increasing the threshold degrades performance, because it introduces too many spurious dependencies (see the last column of Table 4 ). The speaker will occasionally return to a topic described at the beginning of the lecture, and this will bias the algorithm to put the segment boundary closer to the end of the lecture.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 15,
                        "end": 22,
                        "text": "Table 4",
                        "ref_id": null
                    },
                    {
                        "start": 404,
                        "end": 412,
                        "text": "Figure 3",
                        "ref_id": "FIGREF4"
                    },
                    {
                        "start": 596,
                        "end": 603,
                        "text": "Table 4",
                        "ref_id": null
                    },
                    {
                        "start": 834,
                        "end": 841,
                        "text": "Table 4",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Benefits of global analysis",
                "sec_num": null
            },
            {
                "text": "Long-range dependencies do not improve the performance on the synthetic dataset. This is expected since the segments in the synthetic dataset are randomly selected from widely-varying documents in the Brown corpus, even spanning different genres of written language. So, effectively, there are no genuine long-range dependencies that can be exploited by the algorithm.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Benefits of global analysis",
                "sec_num": null
            },
            {
                "text": "Comparison with local dependency models We compare our system with the state-of-the-art similarity-based segmentation system developed by Choi (2000) . We use the publicly available implementation of the system and optimize the system on a range of mask-sizes and different parameter settings described in (Choi, 2000) on a heldout development set of three lectures. To control for segmentation granularity, we specify the number of segments in the reference (\"O\") segmentation for both our system and the baseline. Table 5 shows that the Minimum Cut algorithm consistently outperforms the similarity-based baseline on all the lecture datasets. We attribute this gain to the presence of more attenuated topic transitions in spoken language. Since spoken language is more spontaneous and less structured than written language, the speaker needs to keep the listener abreast of the changes in topic content by introducing subtle cues and references to prior topics in the course of topical transitions. Non-local dependencies help to elucidate shifts in focus, because the strength of a particular transition is measured with respect to other local and long-distance contextual discourse relationships.",
                "cite_spans": [
                    {
                        "start": 138,
                        "end": 149,
                        "text": "Choi (2000)",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 306,
                        "end": 318,
                        "text": "(Choi, 2000)",
                        "ref_id": "BIBREF2"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 516,
                        "end": 523,
                        "text": "Table 5",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Benefits of global analysis",
                "sec_num": null
            },
            {
                "text": "Our system does not outperform Choi's algorithm on the synthetic data. This again can be attributed to the discrepancy in distributional properties of the synthetic corpus which lacks coherence in its thematic shifts and the lecture corpus of spontaneous speech with smooth distributional variations. We also note that we did not try to adjust our model to optimize its performance on the synthetic data. The smoothing method developed for lecture segmentation may not be appropriate for short segments ranging from three to eleven sentences that constitute the synthetic set.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Benefits of global analysis",
                "sec_num": null
            },
            {
                "text": "We also compared our method with another state-of-the-art algorithm which does not explicitly rely on pairwise similarity analysis. This algorithm (Utiyama and Isahara, 2001 ) (UI) computes the optimal segmentation by estimating changes in the language model predictions over different partitions. We used the publicly available implemen-tation of the system that does not require parameter tuning on a heldout development set.",
                "cite_spans": [
                    {
                        "start": 147,
                        "end": 173,
                        "text": "(Utiyama and Isahara, 2001",
                        "ref_id": "BIBREF18"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Benefits of global analysis",
                "sec_num": null
            },
            {
                "text": "Again, our method achieves favorable performance on a range of lecture data sets (See Table 5), and both algorithms attain results close to the range of human agreement scores. The attractive feature of our algorithm, however, is robustness to recognition errors -testing it on the ASR transcripts caused only 7.8% relative increase in P k measure (from 0.298 to 0.322), compared to a 13.5% relative increase for the UI system. We attribute this feature to the fact that the model is less dependent on individual recognition errors, which have a detrimental effect on the local segment language modeling probability estimates for the UI system. The block-level similarity function is not as sensitive to individual word errors, because the partition volume normalization factor dampens their overall effect on the derived models.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Benefits of global analysis",
                "sec_num": null
            },
            {
                "text": "In this paper we studied the impact of long-range dependencies on the accuracy of text segmentation. We modeled text segmentation as a graphpartitioning task aiming to simultaneously optimize the total similarity within each segment and dissimilarity across various segments. We showed that global analysis of lexical distribution improves the segmentation accuracy and is robust in the presence of recognition errors. Combining global analysis with advanced methods for smoothing (Ji and Zha, 2003) and weighting could further boost the segmentation performance.",
                "cite_spans": [
                    {
                        "start": 481,
                        "end": 499,
                        "text": "(Ji and Zha, 2003)",
                        "ref_id": "BIBREF9"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusions",
                "sec_num": "7"
            },
            {
                "text": "Our current implementation does not automatically determine the granularity of a resulting segmentation. This issue has been explored in the past (Ji and Zha, 2003; Utiyama and Isahara, 2001) , and we will explore the existing strategies in our framework. We believe that the algorithm has to produce segmentations for various levels of granularity, depending on the needs of the application that employs it.",
                "cite_spans": [
                    {
                        "start": 146,
                        "end": 164,
                        "text": "(Ji and Zha, 2003;",
                        "ref_id": "BIBREF9"
                    },
                    {
                        "start": 165,
                        "end": 191,
                        "text": "Utiyama and Isahara, 2001)",
                        "ref_id": "BIBREF18"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusions",
                "sec_num": "7"
            },
            {
                "text": "Our ultimate goal is to automatically generate tables of content for lectures. We plan to investigate strategies for generating titles that will succinctly describe the content of each segment. We will explore how the interaction between the generation and segmentation components can improve the performance of such a system as a whole.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusions",
                "sec_num": "7"
            },
            {
                "text": "Our materials are publicly available at http://www. csail.mit.edu/\u02dcigorm/acl06.html",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "A speaker-dependent model of the lecturer was trained on 38 hours of lectures from other courses using the SUM-MIT segment-based Speech Recognizer(Glass, 2003).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "Kappa measure would not be the appropriate measure in this case, because it is not sensitive to near misses, and we cannot make the required independence assumption on the placement of boundaries.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            }
        ],
        "back_matter": [
            {
                "text": "The authors acknowledge the support of the National Science Foundation (CAREER grant IIS-0448168, grant IIS-0415865, and the NSF Graduate Fellowship). Any opinions, findings, conclusions or recommendations expressed in this publication are those of the author(s) and do not necessarily reflect the views of the National Science Foundation. We would like to thank Masao Utiyama for providing us with an implementation of his segmentation system and Alex Gruenstein for assisting us with the NOMOS toolkit. We are grateful to David Karger for an illuminating discussion on the Minimum Cut algorithm. We also would like to acknowledge the MIT NLP and Speech Groups, the three annotators, and the three anonymous reviewers for valuable comments, suggestions, and help.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acknowledgements",
                "sec_num": "8"
            }
        ],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "Statistical models for text segmentation",
                "authors": [
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Beeferman",
                        "suffix": ""
                    },
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Berger",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [
                            "D"
                        ],
                        "last": "Lafferty",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "Machine Learning",
                "volume": "34",
                "issue": "",
                "pages": "177--210",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "D. Beeferman, A. Berger, J. D. Lafferty. 1999. Statistical models for text segmentation. Machine Learning, 34(1- 3):177-210.",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "Latent semantic analysis for text segmentation",
                "authors": [
                    {
                        "first": "F",
                        "middle": [],
                        "last": "Choi",
                        "suffix": ""
                    },
                    {
                        "first": "P",
                        "middle": [],
                        "last": "Wiemer-Hastings",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Moore",
                        "suffix": ""
                    }
                ],
                "year": 2001,
                "venue": "Proceedings of EMNLP",
                "volume": "",
                "issue": "",
                "pages": "109--117",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "F. Choi, P. Wiemer-Hastings, J. Moore. 2001. Latent se- mantic analysis for text segmentation. In Proceedings of EMNLP, 109-117.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "Advances in domain independent linear text segmentation",
                "authors": [
                    {
                        "first": "F",
                        "middle": [
                            "Y Y"
                        ],
                        "last": "Choi",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "Proceedings of the NAACL",
                "volume": "",
                "issue": "",
                "pages": "26--33",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "F. Y. Y. Choi. 2000. Advances in domain independent linear text segmentation. In Proceedings of the NAACL, 26-33.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "Char align: A program for aligning parallel texts at the character level",
                "authors": [
                    {
                        "first": "K",
                        "middle": [
                            "W"
                        ],
                        "last": "Church",
                        "suffix": ""
                    }
                ],
                "year": 1993,
                "venue": "Proceedings of the ACL",
                "volume": "",
                "issue": "",
                "pages": "1--8",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "K. W. Church. 1993. Char align: A program for aligning parallel texts at the character level. In Proceedings of the ACL, 1-8.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Discourse segmentation of multi-party conversation",
                "authors": [
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Galley",
                        "suffix": ""
                    },
                    {
                        "first": "K",
                        "middle": [],
                        "last": "Mckeown",
                        "suffix": ""
                    },
                    {
                        "first": "E",
                        "middle": [],
                        "last": "Fosler-Lussier",
                        "suffix": ""
                    },
                    {
                        "first": "H",
                        "middle": [],
                        "last": "Jing",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "Proceedings of the ACL",
                "volume": "",
                "issue": "",
                "pages": "562--569",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "M. Galley, K. McKeown, E. Fosler-Lussier, H. Jing. 2003. Discourse segmentation of multi-party conversation. In Proceedings of the ACL, 562-569.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "A probabilistic framework for segmentbased speech recognition",
                "authors": [
                    {
                        "first": "J",
                        "middle": [
                            "R"
                        ],
                        "last": "Glass",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "Computer Speech and Language",
                "volume": "17",
                "issue": "2-3",
                "pages": "137--152",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "J. R. Glass. 2003. A probabilistic framework for segment- based speech recognition. Computer Speech and Lan- guage, 17(2-3):137-152.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Meeting structure annotation: Data and tools",
                "authors": [
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Gruenstein",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Niekrasz",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Purver",
                        "suffix": ""
                    }
                ],
                "year": 2005,
                "venue": "Proceedings of the SIGdial Workshop on Discourse and Dialogue",
                "volume": "",
                "issue": "",
                "pages": "117--127",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "A. Gruenstein, J. Niekrasz, M. Purver. 2005. Meeting struc- ture annotation: Data and tools. In Proceedings of the SIGdial Workshop on Discourse and Dialogue, 117-127.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Multi-paragraph segmentation of expository text",
                "authors": [
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Hearst",
                        "suffix": ""
                    }
                ],
                "year": 1994,
                "venue": "Proceedings of the ACL",
                "volume": "",
                "issue": "",
                "pages": "9--16",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "M. Hearst. 1994. Multi-paragraph segmentation of exposi- tory text. In Proceedings of the ACL, 9-16.",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "Domain-independent text segmentation using anisotropic diffusion and dynamic programming",
                "authors": [
                    {
                        "first": "X",
                        "middle": [],
                        "last": "Ji",
                        "suffix": ""
                    },
                    {
                        "first": "H",
                        "middle": [],
                        "last": "Zha",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "Proceedings of SIGIR",
                "volume": "",
                "issue": "",
                "pages": "322--329",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "X. Ji, H. Zha. 2003. Domain-independent text segmentation using anisotropic diffusion and dynamic programming. In Proceedings of SIGIR, 322-329.",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "Linear text segmentation using a dynamic programming algorithm",
                "authors": [
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Kehagias",
                        "suffix": ""
                    },
                    {
                        "first": "P",
                        "middle": [],
                        "last": "Fragkou",
                        "suffix": ""
                    },
                    {
                        "first": "V",
                        "middle": [],
                        "last": "Petridis",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "Proceedings of the EACL",
                "volume": "",
                "issue": "",
                "pages": "171--178",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "A. Kehagias, P. Fragkou, V. Petridis. 2003. Linear text seg- mentation using a dynamic programming algorithm. In Proceedings of the EACL, 171-178.",
                "links": null
            },
            "BIBREF11": {
                "ref_id": "b11",
                "title": "Language modeling and transcription of the ted corpus lectures",
                "authors": [
                    {
                        "first": "E",
                        "middle": [],
                        "last": "Leeuwis",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Federico",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Cettolo",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "Proceedings of ICASSP",
                "volume": "",
                "issue": "",
                "pages": "232--235",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "E. Leeuwis, M. Federico, M. Cettolo. 2003. Language mod- eling and transcription of the ted corpus lectures. In Pro- ceedings of ICASSP, 232-235.",
                "links": null
            },
            "BIBREF12": {
                "ref_id": "b12",
                "title": "A critique and improvement of an evaluation metric for text segmentation",
                "authors": [
                    {
                        "first": "L",
                        "middle": [],
                        "last": "Pevzner",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Hearst",
                        "suffix": ""
                    }
                ],
                "year": 2002,
                "venue": "Computational Linguistics",
                "volume": "28",
                "issue": "1",
                "pages": "19--36",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "L. Pevzner, M. Hearst. 2002. A critique and improvement of an evaluation metric for text segmentation. Computa- tional Linguistics, 28(1):pp. 19-36.",
                "links": null
            },
            "BIBREF13": {
                "ref_id": "b13",
                "title": "An algorithm for suffix stripping",
                "authors": [
                    {
                        "first": "M",
                        "middle": [
                            "F"
                        ],
                        "last": "Porter",
                        "suffix": ""
                    }
                ],
                "year": 1980,
                "venue": "Program",
                "volume": "14",
                "issue": "3",
                "pages": "130--137",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "M. F. Porter. 1980. An algorithm for suffix stripping. Pro- gram, 14(3):130-137.",
                "links": null
            },
            "BIBREF14": {
                "ref_id": "b14",
                "title": "Topic segmentation: Algorithms and applications",
                "authors": [
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Reynar",
                        "suffix": ""
                    }
                ],
                "year": 1998,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "J. Reynar. 1998. Topic segmentation: Algorithms and appli- cations. Ph.D. thesis, University of Pennsylvania.",
                "links": null
            },
            "BIBREF15": {
                "ref_id": "b15",
                "title": "Term weighting approaches in automatic text retrieval",
                "authors": [
                    {
                        "first": "G",
                        "middle": [],
                        "last": "Salton",
                        "suffix": ""
                    },
                    {
                        "first": "C",
                        "middle": [],
                        "last": "Buckley",
                        "suffix": ""
                    }
                ],
                "year": 1988,
                "venue": "Information Processing and Management",
                "volume": "24",
                "issue": "5",
                "pages": "513--523",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "G. Salton, C. Buckley. 1988. Term weighting approaches in automatic text retrieval. Information Processing and Management, 24(5):513-523.",
                "links": null
            },
            "BIBREF16": {
                "ref_id": "b16",
                "title": "Normalized cuts and image segmentation",
                "authors": [
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Shi",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Malik",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "IEEE Transactions on Pattern Analysis and Machine Intelligence",
                "volume": "22",
                "issue": "8",
                "pages": "888--905",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "J. Shi, J. Malik. 2000. Normalized cuts and image segmenta- tion. IEEE Transactions on Pattern Analysis and Machine Intelligence, 22(8):888-905.",
                "links": null
            },
            "BIBREF17": {
                "ref_id": "b17",
                "title": "Measuring the accuracy of diagnostic systems",
                "authors": [
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Swets",
                        "suffix": ""
                    }
                ],
                "year": 1988,
                "venue": "Science",
                "volume": "240",
                "issue": "4857",
                "pages": "1285--1293",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "J. Swets. 1988. Measuring the accuracy of diagnostic sys- tems. Science, 240(4857):1285-1293.",
                "links": null
            },
            "BIBREF18": {
                "ref_id": "b18",
                "title": "A statistical model for domain-independent text segmentation",
                "authors": [
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Utiyama",
                        "suffix": ""
                    },
                    {
                        "first": "H",
                        "middle": [],
                        "last": "Isahara",
                        "suffix": ""
                    }
                ],
                "year": 2001,
                "venue": "Proceedings of the ACL",
                "volume": "",
                "issue": "",
                "pages": "499--506",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "M. Utiyama, H. Isahara. 2001. A statistical model for domain-independent text segmentation. In Proceedings of the ACL, 499-506.",
                "links": null
            }
        },
        "ref_entries": {
            "FIGREF0": {
                "uris": null,
                "text": "Sentence similarity plot for a Physics lecture, with vertical lines indicating true segment boundaries.",
                "num": null,
                "type_str": "figure"
            },
            "FIGREF1": {
                "uris": null,
                "text": "Graph-based Representation of Text",
                "num": null,
                "type_str": "figure"
            },
            "FIGREF4": {
                "uris": null,
                "text": "ROC plot for the Minimum Cut Segmenter on thirty Physics Lectures, with edge cutoffs set at five and hundred sentences.",
                "num": null,
                "type_str": "figure"
            },
            "TABREF1": {
                "text": "Annotator Segmentation Statistics for the first ten Physics lectures.",
                "html": null,
                "content": "<table><tr><td colspan=\"2\">REF/HYP O</td><td>A</td><td>B</td><td>C</td></tr><tr><td>O</td><td>0</td><td colspan=\"3\">0.243 0.418 0.312</td></tr><tr><td>A</td><td colspan=\"2\">0.219 0</td><td colspan=\"2\">0.400 0.355</td></tr><tr><td>B</td><td colspan=\"3\">0.314 0.337 0</td><td>0.332</td></tr><tr><td>C</td><td colspan=\"4\">0.260 0.296 0.370 0</td></tr></table>",
                "num": null,
                "type_str": "table"
            },
            "TABREF2": {
                "text": "P k annotation agreement between different pairs of annotators.",
                "html": null,
                "content": "<table/>",
                "num": null,
                "type_str": "table"
            }
        }
    }
}