File size: 69,068 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
{
    "paper_id": "P96-1028",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T09:02:44.432029Z"
    },
    "title": "Evaluating the Portability of Revision Rules for Incremental Summary Generation",
    "authors": [
        {
            "first": "Jacques",
            "middle": [],
            "last": "Robin",
            "suffix": "",
            "affiliation": {},
            "email": "jr@di.ufpe.br"
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "This paper presents a quantitative evaluation of the portability to the stock market domain of the revision rule hierarchy used by the system STREAK to incrementally generate newswire sports summaries. The evaluation consists of searching a test corpus of stock market reports for sentence pairs whose (semantic and syntactic) structures respectively match the triggering condition and application result of each revision rule. The results show that at least 59% of all rule classes are fully portable, with at least another 7% partially portable.",
    "pdf_parse": {
        "paper_id": "P96-1028",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "This paper presents a quantitative evaluation of the portability to the stock market domain of the revision rule hierarchy used by the system STREAK to incrementally generate newswire sports summaries. The evaluation consists of searching a test corpus of stock market reports for sentence pairs whose (semantic and syntactic) structures respectively match the triggering condition and application result of each revision rule. The results show that at least 59% of all rule classes are fully portable, with at least another 7% partially portable.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "The project STREAK 1 focuses on the specific issues involved in generating short, newswire style, natural language texts that summarize vast amount of input tabular data in their historical context. A series of previous publications presented complementary aspects of this project: motivating corpus analysis in (Robin and McKeown, 1993) , new revision-based text generation model in (Robin, 1993) , system implementation and rule base in (Robin, 1994a) and empirical evaluation of the robustness and scalability of this new model as compared to the traditional single pass pipeline model in (Robin and McKeown, 1995) . The present paper completes this series by describing a second, empirical, corpus-based evaluation, this time quantifying the portability to another domain (the stock market) of the revision rule hierarchy acquired in the sports domain and implemented in STREAK. The goal of this paper is twofold:",
                "cite_spans": [
                    {
                        "start": 312,
                        "end": 337,
                        "text": "(Robin and McKeown, 1993)",
                        "ref_id": "BIBREF5"
                    },
                    {
                        "start": 384,
                        "end": 397,
                        "text": "(Robin, 1993)",
                        "ref_id": "BIBREF7"
                    },
                    {
                        "start": 439,
                        "end": 453,
                        "text": "(Robin, 1994a)",
                        "ref_id": "BIBREF8"
                    },
                    {
                        "start": 592,
                        "end": 617,
                        "text": "(Robin and McKeown, 1995)",
                        "ref_id": "BIBREF6"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "(1) assessing the generality of this particular rule hierarchy and (2) providing a general, semi-automatic 1 Surface Text Reviser Expressing Additional Knowledge. methodology for evaluating the portability of semantic and syntactic knowledge structures used for natural language generation. The results reveal that at least 59% of the revision rule hierarchy abstracted from the sports domain could also be used to incrementally generate the complex sentences observed in a corpus of stock market reports.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "I start by providing the context of the evaluation with a brief overview of STREAK's revision-based generation model, followed by some details about the empirical acquisition of its revision rules from corpus data. I then present the methodology of this evaluation, followed by a discussion of its quantitative results. Finally, I compare this evaluation with other empirical evaluations in text generation and conclude by discussing future directions.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "An overview of STREAK",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "2",
                "sec_num": null
            },
            {
                "text": "The project STREAK was initially motivated by analyzing a corpus of newswire summaries written by professional sportswriters 2. This analysis revealed four characteristics of summaries that challenge the capabilities of previous text generators: concise linguistic forms, complex sentences, optional and background facts opportunistically slipped as modifiers of obligatory facts and high paraphrasing power. By greatly increasing the number of content planning and linguistic realization options that the generator must consider, as well as the mutual constraints among them, these characteristics make generating summaries in a single pass impractical. The example run given in Fig. 1 illustrates how STREAK overcomes these difficulties. It first generates a simple draft sentence that contains only the obligatory facts to include in any game report (location, date, game result and key player statistic). It then applies a series of revision rules 3, each one 2This 800,000 word corpus covers a whole NBA (National Basketball Association) season.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 680,
                        "end": 686,
                        "text": "Fig. 1",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "2",
                "sec_num": null
            },
            {
                "text": "3In Fig. 1 , the nile used is indicated above each re- \"Dallas, TX -Charles Barkley tied a season high with 42 points and Danny Ainge came off the bench to add 21 Sunday as the Phoenix Suns handed the Dallas Mavericks their league worst 13th straight home defeat 123-97.\" opportunistically adding a new fact 4 that either:",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 4,
                        "end": 10,
                        "text": "Fig. 1",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "2",
                "sec_num": null
            },
            {
                "text": "\u2022 Complements an Mready included fact (e.g., revision of sentence 2 into 3).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "2",
                "sec_num": null
            },
            {
                "text": "\u2022 Justifies its relevance by providing its historical background (e.g., revision of sentence 1 into 2).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "2",
                "sec_num": null
            },
            {
                "text": "Some of these revisions are non-monotonic,",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "2",
                "sec_num": null
            },
            {
                "text": "rewording 5 a draft fact to more concisely accommodate the additional fact (e.g., revision of sentence 1 into 2). Popping additional facts from a priority stack, STREAK stops revising when the summary vised sentence. 4Highlighted in bold in Fig. 1 . 5In Fig. 1 , words that get deleted are italicized and words that get modified are underlined.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 241,
                        "end": 247,
                        "text": "Fig. 1",
                        "ref_id": "FIGREF0"
                    },
                    {
                        "start": 254,
                        "end": 260,
                        "text": "Fig. 1",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "2",
                "sec_num": null
            },
            {
                "text": "Charles Barldey scored 42 points. Those 42 points equal his best scoring performance of the season. Danny Ainge is a teammate of Barkley. They play for the Phoenix Suns. Ainge is a reserve player. Yet he scored 21 points. The high scoring performances by Barkley and Ainge helped the Suns defeat the Dallas Mavericks. The Mavericks played on their homecourt in Texas. They had already lost their 12 previous games there. No other team in the league has lost so many games in a row at home. The final score was 123-97. The game was played Sunday. While STREAK generates only single sentences, those complex sentences convey as much information as whole paragraphs made of simple sentences, only far more fluently and concisely. This is illustrated by the 12 sentence paragraph 6 of Fig. 2 , which paraphrases sentence 6 of Fig. 1 . Because they express facts essentially independently of one another, such multi-sentence paragraphs are much easier to generate than the complex single sentences generated by STREAK.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 781,
                        "end": 787,
                        "text": "Fig. 2",
                        "ref_id": "FIGREF1"
                    },
                    {
                        "start": 822,
                        "end": 828,
                        "text": "Fig. 1",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "2",
                "sec_num": null
            },
            {
                "text": "The rules driving the revision process in STREAK were acquired by reverse engineering 7 about 300 corpus sentences. These sentences were initially classified in terms of:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acquiring revision rules from corpus data",
                "sec_num": "3"
            },
            {
                "text": "\u2022 The combination of domain concepts they expressed.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acquiring revision rules from corpus data",
                "sec_num": "3"
            },
            {
                "text": "\u2022 The thematic role and top-level syntactic category used for each of these concepts.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acquiring revision rules from corpus data",
                "sec_num": "3"
            },
            {
                "text": "6This paragraph was not generated by STREAK, it is shown here only for contrastive purposes.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acquiring revision rules from corpus data",
                "sec_num": "3"
            },
            {
                "text": "v i.e., analyzing how they could be incrementally generated through gradual revisions.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acquiring revision rules from corpus data",
                "sec_num": "3"
            },
            {
                "text": "The resulting classes, called realization patterns, abstract the mapping from semantic to syntactic structure by factoring out lexical material and syntactic details. Two examples of realization patterns are given in Fig. 3 . Realization patterns were then grouped into surface decrement pairs consisting of:",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 217,
                        "end": 223,
                        "text": "Fig. 3",
                        "ref_id": "FIGREF2"
                    }
                ],
                "eq_spans": [],
                "section": "Acquiring revision rules from corpus data",
                "sec_num": "3"
            },
            {
                "text": "\u2022 A more complex pattern (called the target pattern).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acquiring revision rules from corpus data",
                "sec_num": "3"
            },
            {
                "text": "\u2022 A simpler pattern (called the source pattern) that is structurally the closest to the target pattern among patterns with one less concept s .",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acquiring revision rules from corpus data",
                "sec_num": "3"
            },
            {
                "text": "The structural transformations from source to target pattern in each surface decrement pair were then hierarchically classified, resulting in the revision rule hierarchy shown in Fig. 4 -10. For example, the surface decrement pair < R~, R 1 >, shown in Fig. 3 , is one of the pairs from which the revision rule Adjunctization of Range into Instrument, shown in Fig. 10 was abstracted.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 179,
                        "end": 185,
                        "text": "Fig. 4",
                        "ref_id": null
                    },
                    {
                        "start": 253,
                        "end": 259,
                        "text": "Fig. 3",
                        "ref_id": "FIGREF2"
                    },
                    {
                        "start": 361,
                        "end": 368,
                        "text": "Fig. 10",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Acquiring revision rules from corpus data",
                "sec_num": "3"
            },
            {
                "text": "It involves displacing the Range argument of the source clause as an Instrument adjunct to accommodate a new verb and its argument. This revision rule is a sibling of the rule Adjunctization of Created into Instrument used to revise sentence i into 2 in STREAK'S run shown in Fig. 1 (where the Created argument role \"42 points\" of the verb \"to score\" in I becomes an Instrument adjunct in 2).",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 276,
                        "end": 282,
                        "text": "Fig. 1",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Acquiring revision rules from corpus data",
                "sec_num": "3"
            },
            {
                "text": "The bottom level of the revision rule hierarchy specifies the side revisions that are orthogonal and sometimes accompany the restructuring revisions discussed up to this point. Side revisions do not make the draft more informative, but instead improve its style, conciseness and unambiguity. For example, when STREAK revises sentence (3) into (4) in the example run of Fig. 1 , the Agent of the absorbed clause \"Danny Ainge added 21 points\" becomes controlled by the new embedding clause \"Danny Ainge came off the bench\" to avoid the verbose form: ? \"Danny Ainge came off the bench for Danny Ainge to add 21 points\".",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 369,
                        "end": 375,
                        "text": "Fig. 1",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "Acquiring revision rules from corpus data",
                "sec_num": "3"
            },
            {
                "text": "Evaluation methodology",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "4",
                "sec_num": null
            },
            {
                "text": "In the spectrum of possible evaluations, the evaluation presented in this paper is characterized as follows:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "4",
                "sec_num": null
            },
            {
                "text": "\u2022 Its object is the revision rule hierarchy acquired from the sports summary corpus. It thus does not directly evaluate the output of STREAK, but rather the special knowledge structures required by its underlying revision-based model. s i.e., the source pattern expresses the same concept combination than the target pattern minus one concept.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "4",
                "sec_num": null
            },
            {
                "text": "The particular property of this revision rule hierarchy that is evaluated is cross-domain portability: how much of it could be re-used to generate summaries in another domain, namely the stock market?",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "4",
                "sec_num": null
            },
            {
                "text": "The basis for this evaluation is corpus data 9. The original sports summary corpus from which the revision rules were acquired is used as the 'training' (or acquisition) corpus and a corpus of stock market reports taken from several newswires is used as the 'test' corpus. This test corpus comprises over 18,000 sentences.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "4",
                "sec_num": null
            },
            {
                "text": "The evaluation procedure is quantitative, measuring percentages of revision rules whose target and source realization patterns are observable in the test corpus. It is also semi-automated through the use of the corpus search tool CREP (Duford, 1993 ) (as explained below).",
                "cite_spans": [
                    {
                        "start": 235,
                        "end": 248,
                        "text": "(Duford, 1993",
                        "ref_id": "BIBREF0"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "4",
                "sec_num": null
            },
            {
                "text": "Basic principle As explained in section 3, a revision rule is associated with a list of surface decrement pairs, each one consisting of:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "4",
                "sec_num": null
            },
            {
                "text": "A source pattern whose content and linguistic form match the triggering conditions of the rule (e.g., R~ in Fig. 3 for the rule Adjunctization of Range into Instrument).",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 108,
                        "end": 114,
                        "text": "Fig. 3",
                        "ref_id": "FIGREF2"
                    }
                ],
                "eq_spans": [],
                "section": "4",
                "sec_num": null
            },
            {
                "text": "A target pattern whose content and linguistic form can be derived from the source pattern by applying the rule (e.g., R 2 in Fig. 3 for the rule Adjunctization of Range into Instrument).",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 125,
                        "end": 131,
                        "text": "Fig. 3",
                        "ref_id": "FIGREF2"
                    }
                ],
                "eq_spans": [],
                "section": "4",
                "sec_num": null
            },
            {
                "text": "This list of decrement pairs can thus be used as the signature of the revision rule to detect its usage in the test corpus. The needed evidence is the simultaneous presence of two test corpus sentences 1\u00b0 , each one respectively matching the source and target patterns of at least one element in this list. Requiring occurrence of the source pattern in the test corpus is necessary for the computation of conservative portability estimates: while it may seem that one target pattern alone is enough evidence, without the presence of the corresponding source pattern, one cannot rule out the possibility that, in the test domain, this target pattern is either a basic pattern or derived from another source pattern using another revision rule.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "4",
                "sec_num": null
            },
            {
                "text": "9Only the corpus analysis was performed for both domains. The implementation was not actually ported to the stock market domain.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "4",
                "sec_num": null
            },
            {
                "text": "1\u00b0In general, not from the same report.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "4",
                "sec_num": null
            },
            {
                "text": "Realization pattern R~:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "4",
                "sec_num": null
            },
            {
                "text": "\u2022 Expresses the concept pair: <game-result(winner,loser,score), streak(winner,aspect,result-type,lengt h) >.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "4",
                "sec_num": null
            },
            {
                "text": "\u2022 Partially automating the evaluation The software tool CREP 11 was developed to partially automate detection of realization patterns in a text corpus. The basic idea behind CREP is to approximate a realization pattern by a regular expression whose terminals are words or parts-of-speech tags (POStags). CR~.P will then automatically retrieve the corpus sentences matching those expressions. For example, the CREP expression C~1 below approximates the realization pattern R~ shown in Fig. 3 :",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 484,
                        "end": 490,
                        "text": "Fig. 3",
                        "ref_id": "FIGREF2"
                    }
                ],
                "eq_spans": [],
                "section": "4",
                "sec_num": null
            },
            {
                "text": "(61) TEAM Of (claimed[recorded)@VBD I-SCORE O= (victory[triumph)@NN O= (over[against)@IN O= TEAM",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "4",
                "sec_num": null
            },
            {
                "text": "In the expression above, 'VBD', 'NN' and 'IN' are the POS-tags for past verb, singular noun and preposition (respectively), and the sub-expressions 'TEAH' and 'SCORE' (whose recursive definitions are not shown here) match the team names and possible final scores (respectively) in the NBA. The CREP operators 'N=' and 'N-' (N being an arbitrary integer) respectively specify exact and minimal distance of N words, and 'l' encodes disjunction.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "4",
                "sec_num": null
            },
            {
                "text": "llcREP was implemented (on top of FLEX, GNUS' version of LEX) and to a large extent also designed by Duford. It uses Ken Church's POS tagger.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "4",
                "sec_num": null
            },
            {
                "text": "Because a realization pattern abstracts away from lexical items to capture the mapping from concepts to syntactic structure, approximating such a pattern by a regular expression of words and POS-tags involves encoding each concept of the pattern by the disjunction of its alternative lexicalizations. In a given domain, there are therefore two sources of inaccuracy for such an approximation:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "4",
                "sec_num": null
            },
            {
                "text": "\u2022 Lexical ambiguity resulting in false positives by over-generalization.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "4",
                "sec_num": null
            },
            {
                "text": "\u2022 Incomplete vocabulary resulting in false negatives by over-specialization 12.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "4",
                "sec_num": null
            },
            {
                "text": "Lexical ambiguities can be alleviated by writing more context-sensitive expressions. The vocabulary can be acquired through additional exploratory CREP runs with expressions containing wild-cards for some concept slots. Although automated corpus search using CREP expressions considerably speedsup corpus analysis, manual intervention remains 12This is the case for example of C1 above, which is a simplification of the actual expression that was used to search occurrences of R~ in the test corpus (e.g., Cz is missing \"win\" and \"rout\" as alternatives for \"victory\"). necessary to filter out incorrect matches resulting from imperfect approximations.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "4",
                "sec_num": null
            },
            {
                "text": "Cross-domain discrepancies Basic similarities between the finance and sports domains form the basis for the portability of the revision rules. In both domains, the core facts reported are statistics compiled within a standard temporal unit (in sports, one ballgame; in finance, one stock market session) together with streaks 13 and records compiled across several such units. This correspondence is, however, imperfect. Consequently, before they can track down usage of a revision rule in the test domain, the CREP expressions approximating the signature of the rule in the acquisition domain must be adjusted for cross-domain discrepancies to prevent false negatives. Two major types of adjustments are necessary: lexical and thematic.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "4",
                "sec_num": null
            },
            {
                "text": "Lexical adjustments handle cases of partial mismatch between the respective vocabularies used to lexicalize matching conceptual structures in each domain. (e.g.,, the verb \"to rebound from\" expresses the interruption of a streak in the stock market domain, while in the basketball domain \"to break\" or \"to snap\" are preferred since \"to rebound\" is used to express a different concept).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "4",
                "sec_num": null
            },
            {
                "text": "Thematic adjustments handle cases of partial differences between corresponding conceptual structures in the acquisition and test domains. For example, while in sports garae-result involves antagonistic teams, its financial domain counterpart session-result concerns only a single indicator. Consequently, the sub-expression for the loser role in the example CI:tEP expression (~1 shown before, and which approximates realization pattern /~ for game-resull; (shown in Fig. 3 ), needs to become optional in order to also approximate patterns for session-resul~. This is done using the CREP operator ? as shown below: (C1/): TEAM O= (claimedlrecorded)@VBD 1-SCORE O= (victoryltriumph) @NN O= ( (over] against)@IN 0= TEAM) ?",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 467,
                        "end": 473,
                        "text": "Fig. 3",
                        "ref_id": "FIGREF2"
                    }
                ],
                "eq_spans": [],
                "section": "4",
                "sec_num": null
            },
            {
                "text": "Note that it is the CREP expressions used to automatically retrieve test corpus sentence pairs attesting usage of a revision rule that require this type of adjustment and not the revision rule itself 14. For example, the Adjoin of Frequency PP to Clause revision rule attaches a streak to a session-result clause without loser role in exactly the same way than it attaches a streak to a game-result with 13i.e., series of events with similar outcome. 14Some revision rules do require adjustment, but of another type (cfl Sect. 5).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "4",
                "sec_num": null
            },
            {
                "text": "loser role. This is illustrated by the two corpus sentences below: P~: \"The Chicago Bulls beat the Phoenix Suns 99 91 for their 3rd straight win\" pt: \"The Amex Market Value Index inched up 0.16 to 481.94 for its sixth straight advance\" Detailed evaluation procedure The overall procedure to test portability of a revision rule consists of considering the surface decrement pairs in the rule signature in order, and repeating the following steps:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "4",
                "sec_num": null
            },
            {
                "text": "1. Write a CREP expression for the acquisition target pattern.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "4",
                "sec_num": null
            },
            {
                "text": "2. Iteratively delete, replace or generalize subexpressions in the CREP expression -to gloss over thematic and lexical discrepancies between the acquisition and test domains, and prevent false negatives -until it matches some test corpus sentence(s).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "4",
                "sec_num": null
            },
            {
                "text": "3. Post-edit the file containing these matched sentences. If it contains only false positives of the sought target pattern, go back to step 2. Otherwise, proceed to step 4.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "4",
                "sec_num": null
            },
            {
                "text": "4. Repeat step (1-3) with the source pattern of the pair under consideration. If a valid match can also be found for this source pattern, stop: the revision rule is portable. Otherwise, start over from step 1 with the next surface decrement pair in the revision rule signature. If there is no next pair left, stop: the revision rule is considered non-portable.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "4",
                "sec_num": null
            },
            {
                "text": "Steps (2,3) constitute a general, generate-and-test procedure to detect realization patterns usage in a corpus 15. Changing one CKEP sub-expression may result in going from too specific an expression with no valid match to either: (1) a well-adjusted expression with a valid match, (2) still too specific an expression with no valid match, or (3) already too general an expression with too many matches to be manually post-edited.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "4",
                "sec_num": null
            },
            {
                "text": "It is in fact always possible to write more contextsensitive expressions, to manually edit larger nomatch files, or even to consider larger test corpora in the hope of finding a match. At some point however, one has to estimate, guided by the results of previous runs, that the likelihood of finding a match is too 15And since most generators rely on knowledge structures equivalent to realization patterns, this procedure can probably be adapted to semi-automatically evaluate the portability of virtually any corpus-based generator. small to justify the cost of further attempts. This is why the last line in the algorithm reads \"considered non-portable\" as opposed to \"non-portable\". The algorithm guarantees the validity of positive (i.e., portable) results only. Therefore, the figures presented in the next section constitute in fact a lowerbound estimate of the actual revision rule portability.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "4",
                "sec_num": null
            },
            {
                "text": "Evaluation results",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "5",
                "sec_num": null
            },
            {
                "text": "The results of the evaluation are summarized in Fig. 4-10 . They show the revision rule hierarchy, with portable classes highlighted in bold. The frequency of occurrence of each rule in the acquisition corpus is given below the leaves of the hierarchy. Some rules are same-concept portable: they are used to attach corresponding concepts in each domain (e.g., Adjoin of Frequency PP to Clause, as explained in Sect. 4). They could be re-used \"as is\" in the financial domain. Other rules, however, are only different-concept portable: they are used to attach altogether different concepts in each domain. This is the case for example of Adjoin Finite Time Clause to Clause, as illustrated by the two corpus sentences below, where the added temporal adjunct (in bold) conveys a streak in the sports sentence, but a complementary statistics in the financial one: T~: \"to lead Utah to a 119-89 trouncing of Denver as the Jazz defeated the Nuggets for the 12th straight time at home.\" T~: \"Volume amounted to a solid 349 million shares as advances out-paced declines 299 to 218.\".",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 48,
                        "end": 57,
                        "text": "Fig. 4-10",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "5",
                "sec_num": null
            },
            {
                "text": "For different-concept portable rules, the left-hand side field specifying the concepts incorporable to the draft using this rule will need to be changed when porting the rule to the stock market domain. In Fig. 4 -10, the arcs leading same-concept portable classes are full and thick, those leading to differentconcept portable classes are dotted, and those leading to a non-portable classes are full but thin.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 206,
                        "end": 212,
                        "text": "Fig. 4",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "5",
                "sec_num": null
            },
            {
                "text": "59% of all revision rule classes turned out to be same-concept portable, with another 7% differentconcept portable. Remarkably, all eight top-level classes identified in the sports domain had instances same-concept portable to the financial domain, even those involving the most complex non-monotonic revisions, or those with only a few instances in the sports corpus. Among the bottom-level classes that distinguish between revision rule applications in very specific semantic and syntactic contexts, 42% are same-concept portable with another 10% differentconcept portable. Finally, the correlation between high usage frequency in the acquisition corpus and portability to the test corpus is not statistically significant (i.e., the hypothesis that the more common a rule, the more likely it is to be portable could not be confirmed on the analyzed sample). See (Robin, 1994b) for further details on the evMuation results.",
                "cite_spans": [
                    {
                        "start": 864,
                        "end": 878,
                        "text": "(Robin, 1994b)",
                        "ref_id": "BIBREF9"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "5",
                "sec_num": null
            },
            {
                "text": "There are two main stumbling blocks to portability: thematic role mismatch and side revisions. Thematic role mismatches are cases where the semantic label or syntactic sub-category of a constituent added or displaced by the rule differ in each domain (e.g., Adjunctization of Created into Instrument vs. Adjoin of Affected into Instrument). They push portability from 92% down to 71%. Their effect could be reduced by allowing STREAK'S reviser to manipulate the draft down to the surface syntactic role level (e.g., in both corpora Created and Affected surface as object). Currently, the reviser stops at the thematic role level to allow STREAK to take full advantage of the syntactic processing front-end SURGE (Elhadad and Robin, 1996) , which accepts such thematic structures as input.",
                "cite_spans": [
                    {
                        "start": 712,
                        "end": 737,
                        "text": "(Elhadad and Robin, 1996)",
                        "ref_id": "BIBREF1"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "5",
                "sec_num": null
            },
            {
                "text": "Accompanying side revisions push portability from 71% to 52%. This suggests that the design of STREAK could be improved by keeping side revisions separate from re-structuring revisions and interleaving the applications of the two. Currently, they are integrated together at the bottom of the revision rule hierarchy.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "5",
                "sec_num": null
            },
            {
                "text": "Related work",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "6",
                "sec_num": null
            },
            {
                "text": "Apart from STREAK, only three generation projects feature an empirical and quantitative evaluation: ANA (Kukich, 1983) , KNIGHT (Lester, 1993) and IM-AGENE (Van der Linden, 1993) . ANA generates short, newswire style summaries of the daily fluctuations of several stock market indexes from half-hourly updates of their values. For evaluation, Kukich measures both the conceptual and linguistic (lexical and syntactic) coverages of ANA by comparing the number of concepts and realization patterns identified during a corpus analysis with those actually implemented in the system.",
                "cite_spans": [
                    {
                        "start": 104,
                        "end": 118,
                        "text": "(Kukich, 1983)",
                        "ref_id": "BIBREF2"
                    },
                    {
                        "start": 128,
                        "end": 142,
                        "text": "(Lester, 1993)",
                        "ref_id": "BIBREF3"
                    },
                    {
                        "start": 156,
                        "end": 178,
                        "text": "(Van der Linden, 1993)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "6",
                "sec_num": null
            },
            {
                "text": "KNIGHT generates natural language concept definitions from a large biological knowledge base, relying on SURGE for syntactic realization. For evaluation, Lester performs a Turing test in which a panel of human judges rates 120 sample definitions by assigning grades (from A to F) for:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "6",
                "sec_num": null
            },
            {
                "text": "\u2022 Semantic accuracy (defined as \"Is the definition adequate, providing correct information and focusing on what's important?\" in the instructions provided to the judges).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "6",
                "sec_num": null
            },
            {
                "text": "\u2022 Stylistic accuracy (defined as \"Does the definition use good prose and is the information it The judges did not know that half the definitions were computer-generated while the other half were written by four human domain experts. Impressively, the results show that:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "6",
                "sec_num": null
            },
            {
                "text": "\u2022 With respect to semantic accuracy, the human judges could not tell KNIGHT apart from the human writers. * While as a group, humans got statistically significantly better grades for stylistic accuracy than KNIGHT, the best human writer was singlehandly responsible for this difference.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "6",
                "sec_num": null
            },
            {
                "text": "IMAGENE generates instructions on how to operate household devices relying on NIGEL (Mann and Matthiessen, 1983) for syntactic realization. The implementation focuses on a very limited aspect of text generation: the realization of purpose relations. Taking as input the description of a pair <operation, purpose of the operation>, augmented by a set of features simulating the communicative context of generation, IMAGENE selects, among the many realizations of purpose generable by NIGEL (e.g., fronted to-infinitive clause vs. trailing for-gerund clauses), the one that is most appropriate for the simulated context (e.g., in the context of several operations sharing the same purpose, the latter is preferentially expressed before those actions than after them). IM-AGENE's contextual preference rules were abstracted by analyzing an acquisition corpus of about 300 purpose clauses from cordless telephone manuMs. For evaluation, Van der Linden compares the purpose realizations picked by IMAGENE to the one in the corresponding corpus text, first on the acquisition corpus and then on a test corpus of about 300 other purpose clauses from manuals for other devices than cordless telephones (ranging from clock radio to automobile). The results show a 71% match on the acquisition corpus 16 and a 52% match on the test corpus.",
                "cite_spans": [
                    {
                        "start": 84,
                        "end": 112,
                        "text": "(Mann and Matthiessen, 1983)",
                        "ref_id": "BIBREF4"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "6",
                "sec_num": null
            },
            {
                "text": "The table of Fig. 11 summarizes the difference on both goal and methodology between the evaluations carried out in the projects ANA, KNIGHT, IMAGENE and STREAK. In terms of goals, while Kukich and Lester evaluate the coverage or accuracy of a particular implementation, I instead focus on three properties inherent to the use of the revision-based generation model underlying STREAK: robustness (how much of other text samples from the same domain can be generated without acquiring new knowledge?) and scalability (how much more new knowledge is needed to fully cover these other samples?) discussed in (Robin and McKeown, 1995) , and portability to another domain in the present paper. Van der Linden does a little bit of both by first measuring the stylistic accuracy of his system for a very restricted sub-domain, and then measuring how it degrades for a more general domain.",
                "cite_spans": [
                    {
                        "start": 604,
                        "end": 629,
                        "text": "(Robin and McKeown, 1995)",
                        "ref_id": "BIBREF6"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 13,
                        "end": 20,
                        "text": "Fig. 11",
                        "ref_id": "FIGREF0"
                    }
                ],
                "eq_spans": [],
                "section": "6",
                "sec_num": null
            },
            {
                "text": "In itself, measuring the accuracy and coverage of a particular implementation in the sub-domain for which it was designed brings little insights about what generation approach should be adopted in future work. Indeed, even a system using mere canned text can be very accurate and attain substantial coverage if enough hand-coding effort is put into it. However, all this effort will have to be entirely duplicated each time the system is scaled up or ported to a new domain. Measuring how much of this effort duplication can be avoided when relying on revisionbased generation was the very object of the three evaluations carried in the STREAK project.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "6",
                "sec_num": null
            },
            {
                "text": "16This imperfect match on the acquisition corpus seems to result from the heuristic nature of IMAGENE's stylistic preferences: individually, none of them needs to apply to the whole corpus.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "6",
                "sec_num": null
            },
            {
                "text": "In terms of methodology, the main originality of these three evaluations is the use of CREP to partially automate reverse engineering of corpus sentences. Beyond evaluation, CREP is a simple, but general and very handy tool that should prove useful to speed-up a wide range of corpora analyses.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "6",
                "sec_num": null
            },
            {
                "text": "In this paper, I presented a quantitative evaluation of the portability to the stock market domain of the revision rule hierarchy used by the system STREAK to incrementally generate newswire sports summaries. The evaluation procedure consists of searching a test corpus of stock market reports for sentence pairs whose (semantic and syntactic) structures respectively match the triggering condition and application result of each revision rule. The results show that at least 59% of all rule classes are fully portable, with at least another 7% partially portable.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "7"
            },
            {
                "text": "Since the sports domain is not closer to the financial domain than to other quantitative domains such as meteorology, demography, business auditing or computer surveillance, these results are very encouraging with respect to the general cross-domain reusability potential of the knowledge structures used in revision-based generation. However, the present evaluation concerned only one type of such knowledge structures: revision rules. In future work, similar evaluations will be needed for the other types of knowledge structures: content selection rules, phrase planning rules and lexicalization rules.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "7"
            }
        ],
        "back_matter": [],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "caEP: a regular expressionmatching textual corpus tool",
                "authors": [
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Duford",
                        "suffix": ""
                    }
                ],
                "year": 1993,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Duford, D. 1993. caEP: a regular expression- matching textual corpus tool. Technical Report CU-CS-005-93. Computer Science Department, Columbia University, New York.",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "An overview of SURGE: a re-usable comprehensive syntactic realization component",
                "authors": [
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Elhadad",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Robin",
                        "suffix": ""
                    }
                ],
                "year": 1996,
                "venue": "Computer Science and Mathematics Department",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Elhadad, M. and Robin, J. 1996. An overview of SURGE: a re-usable comprehensive syntactic realization component. Technical Report 96-03. Computer Science and Mathematics Department, Ben Gurion University, Beer Sheva, Israel.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "Knowledge-based report generation: a knowledge engineering approach to natural language report generation",
                "authors": [
                    {
                        "first": "K",
                        "middle": [],
                        "last": "Kukich",
                        "suffix": ""
                    }
                ],
                "year": 1983,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Kukich, K. 1983. Knowledge-based report genera- tion: a knowledge engineering approach to natural language report generation. PhD. Thesis. Depart- ment of Information Science. University of Pitts- burgh.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "Generating natural language explanations from large-scale knowledge bases",
                "authors": [
                    {
                        "first": "J",
                        "middle": [
                            "C"
                        ],
                        "last": "Lester",
                        "suffix": ""
                    }
                ],
                "year": 1993,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Lester, J.C. 1993. Generating natural language explanations from large-scale knowledge bases. PhD. Thesis. Computer Science Department, University of Texas at Austin.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "NIGEL: a systemic grammar for text generation",
                "authors": [
                    {
                        "first": "W",
                        "middle": [
                            "C"
                        ],
                        "last": "Mann",
                        "suffix": ""
                    },
                    {
                        "first": "C",
                        "middle": [
                            "M"
                        ],
                        "last": "Matthiessen",
                        "suffix": ""
                    }
                ],
                "year": 1983,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Mann, W.C. and Matthiessen, C. M. 1983. NIGEL: a systemic grammar for text generation. Research Report RR-83-105. ISI. Marina Del Rey, CA.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Corpus analysis for revision-based generation of complex sentences",
                "authors": [
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Robin",
                        "suffix": ""
                    },
                    {
                        "first": "K",
                        "middle": [
                            "R"
                        ],
                        "last": "Mckeown",
                        "suffix": ""
                    }
                ],
                "year": 1993,
                "venue": "Proceedings of the 11th National Conference on Artificial Intelligence, Washington DC. (AAAI'93)",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Robin, J. and McKeown, K.R. 1993. Corpus anal- ysis for revision-based generation of complex sen- tences. In Proceedings of the 11th National Con- ference on Artificial Intelligence, Washington DC. (AAAI'93).",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Empirically designing and evaluating a new revision-based model for summary generation",
                "authors": [
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Robin",
                        "suffix": ""
                    },
                    {
                        "first": "K",
                        "middle": [
                            "R"
                        ],
                        "last": "Mckeown",
                        "suffix": ""
                    }
                ],
                "year": 1995,
                "venue": "Special Issue on Empirical Methods",
                "volume": "85",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Robin, J. and McKeown, K.R. 1995. Empirically designing and evaluating a new revision-based model for summary generation. Artificial Intel- ligence. Vol 85. Special Issue on Empirical Meth- ods. North-Holland.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "A revision-based generation architecture for reporting facts in their historical context",
                "authors": [
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Robin",
                        "suffix": ""
                    }
                ],
                "year": 1993,
                "venue": "New Concepts in Natural Language Generation: Planning, Realization and System. Horacek, H. and Zock",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Robin, J. 1993. A revision-based generation archi- tecture for reporting facts in their historical con- text. In New Concepts in Natural Language Gen- eration: Planning, Realization and System. Ho- racek, H. and Zock, M., Eds. Frances Pinter.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Automatic generation and revision of natural language summaries providing historical background",
                "authors": [
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Robin",
                        "suffix": ""
                    }
                ],
                "year": 1994,
                "venue": "Proceedings of the 11th Brazilian Symposium on Artificial Intelligence, Fortaleza",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Robin, J. 1994a. Automatic generation and revision of natural language summaries providing histori- cal background In Proceedings of the 11th Brazil- ian Symposium on Artificial Intelligence, Fort- aleza, Brazil. (SBIA'94).",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "Revision-based generation of natural language summaries providing historical background: corpus-based analysis, design, implementation and evaluation",
                "authors": [
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Robin",
                        "suffix": ""
                    }
                ],
                "year": 1994,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Robin, J. 1994b. Revision-based generation of natu- ral language summaries providing historical back- ground: corpus-based analysis, design, implemen- tation and evaluation. PhD. Thesis. Available as Technical Report CU-CS-034-94. Computer Science Department, Columbia University, New York.",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "Expressing rhetorical relations in instructional texts: a case study of the purpose relation",
                "authors": [
                    {
                        "first": "K",
                        "middle": [],
                        "last": "Van Der Linden",
                        "suffix": ""
                    },
                    {
                        "first": "J",
                        "middle": [
                            "H"
                        ],
                        "last": "Martin",
                        "suffix": ""
                    }
                ],
                "year": 1995,
                "venue": "Computational Linguistics",
                "volume": "21",
                "issue": "1",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Van der Linden, K. and Martin, J.H. 1995. Ex- pressing rhetorical relations in instructional texts: a case study of the purpose relation. Computa- tional Linguistics, 21(1). MIT Press.",
                "links": null
            }
        },
        "ref_entries": {
            "FIGREF0": {
                "type_str": "figure",
                "uris": null,
                "num": null,
                "text": "Complex sentence generation through incremental revisions in STREAK"
            },
            "FIGREF1": {
                "type_str": "figure",
                "uris": null,
                "num": null,
                "text": "Paragraph of simple sentences paraphrasing a single complex sentence sentence reaches linguistic complexity limits empir-icMly observed in the corpus (e.g., 50 word long or parse tree of depth 10)."
            },
            "FIGREF2": {
                "type_str": "figure",
                "uris": null,
                "num": null,
                "text": "Realization pattern examples"
            },
            "FIGREF3": {
                "type_str": "figure",
                "uris": null,
                "num": null,
                "text": "rument as-affected-apposition as-mean as-co-event Demotion and Promotion revision rule sub-hierarchy Adjoin Empirical evaluations in language generation conveys well organized\" in the instructions provided to the judges)."
            },
            "TABREF0": {
                "text": "1. Initial draft (basic sentence pattern):\"Dallas, TX -Charles Barkley scored 42 points Sunday as the Phoenix Suns defeated the Dallas Mavericks 123-97.\"",
                "type_str": "table",
                "content": "<table><tr><td>2. Adjunctization of Created into Instrument:</td></tr><tr><td>\"Dallas, TX -Charles Barkley tied a season high wlth 42 points Sunday as the Phoenix Suns defeated the</td></tr><tr><td>Dallas Mavericks 123-97.\"</td></tr><tr><td>3. Coordinative Conjoin of Clause:</td></tr><tr><td>\"Dallas, TX -Charles Barkley tied a season high with 42 points and Danny A|nge added 21 Sunday as the</td></tr><tr><td>Phoenix Suns defeated the Dallas Mavericks 123-97.\"</td></tr><tr><td>-97.\"</td></tr><tr><td>5. l~ominalization with 0rdinal Adjoin:</td></tr><tr><td>\"Dallas, TX -Charles Barldey tied a season high with 42 points and Danny Ainge came off the bench to add</td></tr><tr><td>21 Sunday as the Phoenix Suns handed the Dallas Mavericks their 13th straight home defeat 123-97.\"</td></tr></table>",
                "html": null,
                "num": null
            },
            "TABREF1": {
                "text": "Is a target pattern of the revision rule Adjunctization of Range into Instrument.",
                "type_str": "table",
                "content": "<table><tr><td>winner</td><td>aspect</td><td colspan=\"2\">l type</td><td colspan=\"2\">l streak</td><td>length</td><td>[</td><td/><td>[ score ]game-result [</td><td>loser</td></tr><tr><td>agent</td><td>action</td><td colspan=\"3\">affected/located</td><td/><td>location</td><td/><td/><td>instrument</td></tr><tr><td>proper</td><td>verb</td><td/><td>NP</td><td/><td/><td>PP</td><td/><td/><td>PP</td></tr><tr><td/><td/><td colspan=\"3\">det] classifier I noun</td><td/><td/><td>prep ]</td><td/><td>NP</td></tr><tr><td>Utah</td><td>extended</td><td>its</td><td>win</td><td>streak</td><td/><td>to 6 games</td><td>with</td><td>a</td><td>L_J numbe~J 99-84 triumph -over enl3-d-~V-~ PP</td></tr><tr><td colspan=\"2\">Boston stretching</td><td colspan=\"2\">its winning</td><td>spree</td><td/><td colspan=\"2\">to 9 outings with</td><td>a</td><td>118-94</td><td>rout</td><td>of Utah</td></tr><tr><td colspan=\"3\">Realization pattern R~:</td><td/><td/><td/><td/><td/><td/></tr><tr><td/><td/><td>winner</td><td/><td/><td/><td colspan=\"3\">I score ] game-result I</td><td>loser</td></tr><tr><td/><td/><td>agent</td><td colspan=\"2\">action</td><td/><td/><td>range</td><td/></tr><tr><td/><td/><td>proper</td><td colspan=\"2\">support-verb</td><td/><td/><td>NP</td><td/></tr><tr><td/><td/><td>Chicago</td><td colspan=\"2\">claimed</td><td colspan=\"2\">det I number I a</td><td>noun Y</td><td>I</td><td>PP over New Jersey</td></tr><tr><td/><td/><td>Orlando</td><td colspan=\"2\">recorded</td><td>a</td><td>101-95</td><td>triumph</td><td/><td>against New York</td></tr></table>",
                "html": null,
                "num": null
            }
        }
    }
}