File size: 67,944 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
{
    "paper_id": "P02-1004",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T09:31:13.516518Z"
    },
    "title": "Machine-learned contexts for linguistic operations in German sentence realization",
    "authors": [
        {
            "first": "Michael",
            "middle": [],
            "last": "Gamon",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Microsoft Research Microsoft Corporation Redmond",
                "location": {
                    "postCode": "98052",
                    "region": "WA"
                }
            },
            "email": "mgamon@microsoft.com"
        },
        {
            "first": "Eric",
            "middle": [],
            "last": "Ringger",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Microsoft Research Microsoft Corporation Redmond",
                "location": {
                    "postCode": "98052",
                    "region": "WA"
                }
            },
            "email": "ringger@microsoft.com"
        },
        {
            "first": "Simon",
            "middle": [],
            "last": "Corston-Oliver",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Microsoft Research Microsoft Corporation Redmond",
                "location": {
                    "postCode": "98052",
                    "region": "WA"
                }
            },
            "email": "simonco@microsoft.com"
        },
        {
            "first": "Robert",
            "middle": [],
            "last": "Moore",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "Microsoft Research Microsoft Corporation Redmond",
                "location": {
                    "postCode": "98052",
                    "region": "WA"
                }
            },
            "email": "bobmoore@microsoft.com"
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "We show that it is possible to learn the contexts for linguistic operations which map a semantic representation to a surface syntactic tree in sentence realization with high accuracy. We cast the problem of learning the contexts for the linguistic operations as classification tasks, and apply straightforward machine learning techniques, such as decision tree learning. The training data consist of linguistic features extracted from syntactic and semantic representations produced by a linguistic analysis system. The target features are extracted from links to surface syntax trees. Our evidence consists of four examples from the German sentence realization system code-named Amalgam: case assignment, assignment of verb position features, extraposition, and syntactic aggregation",
    "pdf_parse": {
        "paper_id": "P02-1004",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "We show that it is possible to learn the contexts for linguistic operations which map a semantic representation to a surface syntactic tree in sentence realization with high accuracy. We cast the problem of learning the contexts for the linguistic operations as classification tasks, and apply straightforward machine learning techniques, such as decision tree learning. The training data consist of linguistic features extracted from syntactic and semantic representations produced by a linguistic analysis system. The target features are extracted from links to surface syntax trees. Our evidence consists of four examples from the German sentence realization system code-named Amalgam: case assignment, assignment of verb position features, extraposition, and syntactic aggregation",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "The last stage of natural language generation, sentence realization, creates the surface string from an abstract (typically semantic) representation. This mapping from abstract representation to surface string can be direct, or it can employ intermediate syntactic representations which significantly constrain the output. Furthermore, the mapping can be performed purely by rules, by application of statistical models, or by a combination of both techniques.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Among the systems that use statistical or machine learned techniques in sentence realization, there are various degrees of intermediate syntactic structure. Nitrogen Knight, 1998a, 1998b ) produces a large set of alternative surface realizations of an input structure (which can vary in abstractness). This set of candidate surface strings, represented as a word lattice, is then rescored by a wordbigram language model, to produce the bestranked output sentence. FERGUS (Bangalore and Rambow, 2000) , on the other hand, employs a model of syntactic structure during sentence realization. In simple terms, it adds a tree-based stochastic model to the approach taken by the Nitrogen system. This tree-based model chooses a best-ranked XTAG representation for a given dependency structure. Possible linearizations of the XTAG representation are generated and then evaluated by a language model to pick the best possible linearization, as in Nitrogen.",
                "cite_spans": [
                    {
                        "start": 166,
                        "end": 186,
                        "text": "Knight, 1998a, 1998b",
                        "ref_id": null
                    },
                    {
                        "start": 471,
                        "end": 499,
                        "text": "(Bangalore and Rambow, 2000)",
                        "ref_id": "BIBREF0"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "In contrast, the sentence realization system code-named Amalgam (A Machine Learned Generation Module) Gamon et al., 2002b ) employs a series of linguistic operations which map a semantic representation to a surface syntactic tree via intermediate syntactic representations. The contexts for most of these operations in Amalgam are machine learned. The resulting syntactic tree contains all the necessary information on its leaf nodes from which a surface string can be read.",
                "cite_spans": [
                    {
                        "start": 102,
                        "end": 121,
                        "text": "Gamon et al., 2002b",
                        "ref_id": "BIBREF9"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "The goal of this paper is to show that it is possible to learn accurately the contexts for linguistically complex operations in sentence realization. We propose that learning the contexts for the application of these linguistic operations can be viewed as per-operation classification problems. This approach combines advantages of a linguistically informed approach to sentence realization with the advantages of a machine learning approach. The linguistically informed approach allows us to deal with complex linguistic phenomena, while machine learning automates the discovery of contexts that are linguistically relevant and relevant for the domain of the data. The machine learning approach also facilitates adaptation of the system to a new domain or language. Furthermore, the quantitative nature of the machine learned models permits finer distinctions and ranking among possible solutions.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "To substantiate our claim, we provide four examples from Amalgam: assignment of case, assignment of verb position features, extraposition, and syntactic aggregation.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Amalgam takes as its input a sentence-level semantic graph representation with fixed lexical choices for content words (the logical form graph of the NLPWin system -see (Heidorn, 2000) ). This representation is first degraphed into a tree, and then gradually augmented by the insertion of function words, assignment of case and verb position features, syntactic labels, etc., and transformed into a syntactic surface tree. A generative statistical language model establishes linear order in the surface tree (Ringger et al., in preparation) , and a surface string is generated from the leaf nodes. Amalgam consists of eight stages. We label these ML (machine-learned context) or RB (rule-based). All machine learned components, with the exception of the generative language model for ordering of constituents (stage 5), are decision tree classifiers built with the WinMine toolkit (Chickering et al., 1997; Chickering, nd.) . There are a total of eighteen decision tree classifiers in the system. The complexity of the decision trees varies with the complexity of the modeled task. The number of branching nodes in the decision tree models in Amalgam ranges from 3 to 447.",
                "cite_spans": [
                    {
                        "start": 169,
                        "end": 184,
                        "text": "(Heidorn, 2000)",
                        "ref_id": null
                    },
                    {
                        "start": 508,
                        "end": 540,
                        "text": "(Ringger et al., in preparation)",
                        "ref_id": null
                    },
                    {
                        "start": 881,
                        "end": 906,
                        "text": "(Chickering et al., 1997;",
                        "ref_id": "BIBREF3"
                    },
                    {
                        "start": 907,
                        "end": 923,
                        "text": "Chickering, nd.)",
                        "ref_id": "BIBREF2"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Overview of Amalgam",
                "sec_num": null
            },
            {
                "text": "The data for all of the models were drawn from a set of 100,000 sentences from technical software manuals and help files. The sentences are analyzed by the NLPWin system, which provides a syntactic and logical form analysis. Nodes in the logical form representation are linked to the corresponding syntactic nodes, allowing us to learn contexts for the mapping from the semantic representation to a surface syntax tree. The data is split 70/30 for training versus model parameter tuning. For each set of data we built decision trees at several different levels of granularity (by manipulating the prior probability of tree structures to favor simpler structures) and selected the model with the maximal accuracy as determined on the parameter tuning set. All models are then tested on data extracted from a separate blind set of 10,000 sentences from the same domain. For both training and test, we only extract features from sentences that have received a complete, spanning parse: 85.14% of the sentences in the training and parameter tuning set, and 84.59% in the blind test set fall into that category. Most sentences yield more than one training case.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Data and feature extraction",
                "sec_num": "3"
            },
            {
                "text": "We attempt to standardize as much as possible the set of features to be extracted. We exploit the full set of features and attributes available in the analysis, instead of pre-determining a small set of potentially relevant features (Gamon et al., 2002b) . This allows us to share the majority of code between the individual feature extraction tasks. More importantly, it enables us to discover new linguistically interesting and/or domainspecific generalizations from the data. Typically, we extract the full set of available analysis features of the node under investigation, its parent and its grandparent, with the only restriction being that these features need to be available at the stage where the model is consulted at generation runtime. This provides us with a sufficiently large structural context for the operations. In addition, for some of the models we add a small set of features that we believe to be important for the task at hand, and that cannot easily be expressed as a combination of analysis features/attributes on constituents. Most features, such as lexical subcategorization features and semantic features such as [Definite] are binary. Other features, such as syntactic label or semantic relation, have as many as 25 values. Training time on a standard 500MHz PC ranges from one hour to six hours.",
                "cite_spans": [
                    {
                        "start": 233,
                        "end": 254,
                        "text": "(Gamon et al., 2002b)",
                        "ref_id": "BIBREF9"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Data and feature extraction",
                "sec_num": "3"
            },
            {
                "text": "In German sentence realization, proper assignment of morphological case is essential for fluent and comprehensible output. German is a language with fairly free constituent order, and the identification of functional roles, such as subject versus object, is not determined by position in the sentence, as in English, but by morphological marking of one of the four cases: nominative, accusative, genitive or dative. In Amalgam, case assignment is one of the last steps in the Flesh-out stage (stage 2). Morphological realization of case can be ambiguous in German (for example, a feminine singular NP is ambiguous between accusative and nominative case). Since the morphological realization of case depends on the gender, number and morphological paradigm of a given NP, we chose to only consider NP nodes with unambiguous case as training data for the model 1 . As the target feature for this model is 1 Ideally, we should train the case assignment model on a corpus that is hand-disambiguated for case. In the absence of such a corpus, though, we believe that our approach is linguistically justified. The case of an NP depends solely on the syntactic context it appears in. morphological case, it has four possible values for the four cases in German.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Assignment of case",
                "sec_num": null
            },
            {
                "text": "For each data point, a total of 712 features was extracted. Of the 712 features available to the decision tree building tools, 72 were selected as having predictive value in the model. The selected features fall into the following categories:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Features in the case assignment model",
                "sec_num": "4.1"
            },
            {
                "text": "\u2022 syntactic label of the node, its parent and grandparent \u2022 lemma (i.e., citation form) of the parent, and lemma of the governing preposition \u2022 subcategorization information, including case governing properties of governing preposition and parent \u2022 semantic relation of the node itself to its parent, of the parent to its grandparent, and of the grandparent to its greatgrandparent \u2022 number information on the parent and grandparent \u2022 tense and mood on the parent and grandparent \u2022 definiteness on the node, its parent and grandparent \u2022 the presence of various semantic dependents such as subject, direct and indirect objects, operators, attributive adjuncts and unspecified modifiers on the node and its parent and grandparent \u2022 quantification, negation, coordination on the node, the parent and grandparent \u2022 part of speech of the node, the parent and the grandparent \u2022 miscellaneous semantic features on the node itself and the parent",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Features in the case assignment model",
                "sec_num": "4.1"
            },
            {
                "text": "The decision tree model for case assignment has 226 branching nodes, making it one of the most complex models in Amalgam. For each nominal node in the 10,000 sentence test set, we compared the prediction of the model to the Since we want to learn the syntactically determining factors for case, using unambiguously case marked NPs for training seems justified. morphological case compatible with that node. The previously mentioned example of a singular feminine NP, for example, would yield a \"correct\" if the model had predicted nominative or accusative case (because the NP is morphologically ambiguous between accusative and nominative), and it would yield an \"incorrect\" if the model had predicted genitive or dative. This particular evaluation setup was a necessary compromise because of the absence of a handannotated corpus with disambiguated case in our domain. The caveat here is that downstream models in the Amalgam pipeline that pick up on case as one of their features rely on the absolute accuracy of the assigned case, not the relative accuracy with respect to morphological ambiguity. Accuracy numbers for each of the four case assignments are given in Table 1 . Note that it is impossible to give precision/recall numbers, without a hand-disambiguated test set. The baseline for this task is 0.7049 (accuracy if the most frequent case (nominative) had been assigned to all NPs). ",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 1170,
                        "end": 1177,
                        "text": "Table 1",
                        "ref_id": "TABREF0"
                    }
                ],
                "eq_spans": [],
                "section": "The case assignment model",
                "sec_num": "4.2"
            },
            {
                "text": "One of the most striking properties of German is the distributional pattern of verbs in main and subordinate clauses. Most descriptive accounts of German syntax are based on a topology of the German sentence that treats the position of the verb as the fixed frame around which other syntactic constituents are organized in relatively free order (cf. Eisenberg, 1999; Engel, 1996) . The position of the verb in German is non-negotiable; errors in the positioning of the verb result in gibberish, whereas most permutations of other constituents only result in less fluent output. Depending on the position of the finite verb, German sentences and verb phrases are classified as being \"verb-initial\", \"verb-second\" or \"verb-final\". In verb-initial clauses (e.g., in imperatives), the finite verb is in initial position. Verb-second sentences contain one constituent preceding the finite verb, in the so-called \"pre-field\". The finite verb is followed by any number of constituents in the \"middle-field\", and any non-finite verbs are positioned at the right periphery of the clause, possibly followed by extraposed material or complement clauses (the \"post-field\"). Verb-final clauses contain no verbal element in the verbsecond position: all verbs are clustered at the right periphery, preceded by any number of constituents and followed only by complement clauses and extraposed material.",
                "cite_spans": [
                    {
                        "start": 350,
                        "end": 366,
                        "text": "Eisenberg, 1999;",
                        "ref_id": "BIBREF6"
                    },
                    {
                        "start": 367,
                        "end": 379,
                        "text": "Engel, 1996)",
                        "ref_id": "BIBREF7"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Assignment of verb position features",
                "sec_num": null
            },
            {
                "text": "During the Flesh-out stage in Amalgam, a decision tree classifier is consulted to make a classification decision among the four verb positions: \"verb-initial\", \"verb-second\", \"verbfinal\", and \"undefined\". The value \"undefined\" for the target feature of verb position is extracted for those verbal constituents where the local syntactic context is too limited to make a clear distinction between initial, second, or final position of the verb. The number of \"undefined\" verb positions is small compared to the number of clearly established verb positions: in the test set, there were only 690 observed cases of \"undefined\" verb position out of a total of 15,492 data points. At runtime in Amalgam, verb position features are assigned based on the classification provided by the decision tree model.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Assignment of verb position features",
                "sec_num": null
            },
            {
                "text": "For each data point, 713 features were extracted. Of those features, 41 were selected by the decision tree algorithm. The selected features fall into the following categories:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Features in the verb position model",
                "sec_num": "5.1"
            },
            {
                "text": "\u2022 syntactic label of the node and the parent \u2022 subcategorization features \u2022 semantic relations of the node to its parent and of the parent node to its parent \u2022 tense and mood features \u2022 presence of empty, uncontrolled subject \u2022 semantic features on the node and the parent",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Features in the verb position model",
                "sec_num": "5.1"
            },
            {
                "text": "The decision tree model for verb position has 115 branching nodes. Precision, recall and F-measure for the model are given in Table 2 . As a point of reference for the verb position classifier, assigning the most frequent value (second) of the target feature yields a baseline score of 0.4240. The interesting difference between English and German is the frequency of this phenomenon. While it can easily be argued that English sentence realization may ignore extraposition and still result in very fluent output, the fluency of sentence realization for German will suffer much more from the lack of a good extraposition mechanism. We profiled data from various domains (Gamon et al. 2002a) to substantiate this linguistic claim (see Uszkoreit et al. 1998 for similar results). In the technical domain, more than one third of German relative clauses are extraposed, as compared to a meagre 0.22% of English relative clauses. In encyclopaedia text (Microsoft Encarta), approximately every fifth German relative clause is extraposed, compared to only 0.3% of English relative clauses. For complement clauses and infinitival clauses, the differences are not as striking, but still significant: in the technical and encyclopaedia domains, extraposition of infinitival and complement clauses in German ranges from 1.5% to 3.2%, whereas English only shows a range from 0% to 0.53%. We chose to model extraposition as an iterative movement process from the original attachment site to the next higher node in the tree (for an alternative one-step solution and a comparison of the two approaches see (Gamon et al., 2002a) ). The target feature of the model is the answer to the yes/no question \"Should the clause move from node X to the parent of node X?\".",
                "cite_spans": [
                    {
                        "start": 670,
                        "end": 690,
                        "text": "(Gamon et al. 2002a)",
                        "ref_id": "BIBREF8"
                    },
                    {
                        "start": 734,
                        "end": 755,
                        "text": "Uszkoreit et al. 1998",
                        "ref_id": "BIBREF17"
                    },
                    {
                        "start": 1592,
                        "end": 1613,
                        "text": "(Gamon et al., 2002a)",
                        "ref_id": "BIBREF8"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 126,
                        "end": 133,
                        "text": "Table 2",
                        "ref_id": "TABREF1"
                    }
                ],
                "eq_spans": [],
                "section": "The verb position model",
                "sec_num": "5.2"
            },
            {
                "text": "The tendency of a clause to be extraposed depends on properties of both the clause itself (e.g., some notion of \"heaviness\") and the current attachment site. Very coarse linguistic generalizations are that a relative clause tends to be extraposed if it is sufficiently \"heavy\" and if it is followed by verbal material in the same clause. Feature extraction for this model reflects that fact by taking into consideration features on the extraposition candidate, the current attachment site, and potential next higher landing site. This results in a total of 1168 features. Each node in the parent chain of an extraposable clause, up to the actual attachment node, constitutes a single data point During the decision tree building process, 60 features were selected as predictive. They can be classified as follows: General feature:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Features in the extraposition model",
                "sec_num": "6.1"
            },
            {
                "text": "\u2022 overall sentence length",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Features in the extraposition model",
                "sec_num": "6.1"
            },
            {
                "text": "\u2022 presence of verb-final and verb-second ancestor nodes \u2022 \"heaviness\" both in number of characters and number of tokens ",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Features on the extraposable clause:",
                "sec_num": null
            },
            {
                "text": "During testing of the extraposition model, the model was consulted for each extraposable clause to find the highest node to which that clause could be extraposed. In other words, the target node for extraposition is the highest node in the parent chain for which the answer to the classification task \"Should the clause move from node X to the parent of node X?\" is \"yes\" with no interceding \"no\" answer. The prediction of the model was compared with the actual observed attachment site of the extraposable clause to yield the accuracy figures shown in Table 3 . The model has 116 branching nodes. The baseline for this task is calculated by applying the most frequent value for the target feature (\"don't move\") to all nodes. The baseline for extraposition of infinitival and complement clauses is very high. The number of extraposed clauses of both types in the test set (fifteen extraposed infinitival clauses and twelve extraposed complement clauses) is very small, so it comes as no surprise that the model accuracy ranges around the baseline for these two types of extraposed clauses. ",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 553,
                        "end": 560,
                        "text": "Table 3",
                        "ref_id": "TABREF2"
                    }
                ],
                "eq_spans": [],
                "section": "The extraposition model",
                "sec_num": "6.2"
            },
            {
                "text": "Any sentence realization component that generates from an abstract semantic representation and strives to produce fluent output beyond simple templates will have to deal with coordination and the problem of duplicated material in coordination. This is generally viewed as a subarea of aggregation in the generation literature (Wilkinson, 1995; Shaw, 1998; Reape and Mellish, 1999; Dalianis and Hovy, 1993) . In Amalgam, the approach we take is strictly intrasentential, along the lines of what has been called conjunction reduction in the linguistic literature (McCawley, 1988) . While this may seem a fairly straightforward task compared to inter-sentential, semantic and lexical aggregation, it should be noted that the cross-linguistic complexity of the phenomenon makes it much less trivial than a first glance at English would suggest. In German, for example, position of the verb in the coordinated VPs plays an important role in determining which duplicated constituent can be omitted. The target feature for the classification task is formulated as follows: \"In which coordinated constituent is the duplicated constituent to be realized?\". There are three values for the target feature: \"first\", \"last\", and \"middle\". The third value (\"middle\") is a default value for cases where neither the first, nor the last coordinated constituent can be identified as the location for the realization of duplicated constituents. At generation runtime, multiple realizations of a constituent in coordination are collected and the aggregation model is consulted to decide on the optimal position in which to realize that constituent. The constituent in that position is retained, while all other duplicates are removed from the tree.",
                "cite_spans": [
                    {
                        "start": 326,
                        "end": 343,
                        "text": "(Wilkinson, 1995;",
                        "ref_id": "BIBREF18"
                    },
                    {
                        "start": 344,
                        "end": 355,
                        "text": "Shaw, 1998;",
                        "ref_id": "BIBREF16"
                    },
                    {
                        "start": 356,
                        "end": 380,
                        "text": "Reape and Mellish, 1999;",
                        "ref_id": "BIBREF14"
                    },
                    {
                        "start": 381,
                        "end": 405,
                        "text": "Dalianis and Hovy, 1993)",
                        "ref_id": "BIBREF5"
                    },
                    {
                        "start": 561,
                        "end": 577,
                        "text": "(McCawley, 1988)",
                        "ref_id": "BIBREF13"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Syntactic aggregation",
                "sec_num": "7"
            },
            {
                "text": "A total of 714 features were extracted for the syntactic aggregation model. Each instance of coordination which exhibits duplicated material at the semantic level without corresponding duplication at the syntactic level constitutes a data point. Of these features, 15 were selected as predictive in the process of building the decision tree model:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Features in the syntactic aggregation model",
                "sec_num": "7.1"
            },
            {
                "text": "\u2022 syntactic label and syntactic label of the parent node ",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Features in the syntactic aggregation model",
                "sec_num": "7.1"
            },
            {
                "text": "The syntactic aggregation model has 21 branching nodes. Precision, recall and F-measure for the model are given in Table 4 . As was to be expected on the basis of linguistic intuition, the value \"middle\" for the target feature did not play any role. In the test set there were only 2 observed instances of that value. The baseline for this task is 0.8566 (assuming \"first\" as the default value). ",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 115,
                        "end": 122,
                        "text": "Table 4",
                        "ref_id": "TABREF3"
                    }
                ],
                "eq_spans": [],
                "section": "The syntactic aggregation model",
                "sec_num": "7.2"
            },
            {
                "text": "We have demonstrated on the basis of four examples that it is possible to learn the contexts for complex linguistic operations in sentence realization with high accuracy. We proposed to standardize most of the feature extraction for the machine learning tasks to all available linguistic features on the node, and its parent and grandparent node. This generalized set of features allows us to rapidly train on new sets of data and to experiment with new machine learning tasks. Furthermore, it prevents us from focusing on a small set of hand-selected features for a given phenomenon; hence, it allows us to learn new (and unexpected) generalizations from new data.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion and future research",
                "sec_num": null
            },
            {
                "text": "We have found decision trees to be useful for our classification problems, but other classifiers are certainly applicable. Decision trees provided an easily accessible inventory of the selected features and some indication of their relative importance in predicting the target features in question. Although our exposition has focused on the preferred value (the mode) predicted by the models, decision trees built by WinMine predict a probability distribution over all possible target values. For a system such as Amalgam, built as a pipeline of stages, this point is critical, since finding the best final hypothesis requires the consideration of multiple hypotheses and the concomitant combination of probabilities assigned by the various models in the pipeline to all possible target values. For example, our extraposition model presented above depends upon the value of the verb-position feature, which is predicted upstream in the pipeline. Currently, we greedily pursue the best hypothesis, which includes only the mode of the verb-position model's prediction. However, work in progress involves a search that constructs multiple hypotheses incorporating each of the predictions of the verb-position model and their scores, and likewise for all other models.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion and future research",
                "sec_num": null
            },
            {
                "text": "We have found the combination of knowledgeengineered linguistic operations with machinelearned contexts to be advantageous. The knowledge-engineered choice of linguistic operations, allows us to deal with complex linguistic phenomena. Machine learning, on the other hand, automates the discovery of general and domain-specific contexts. This facilitates adaptation of the system to a new domain or even to a new language.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion and future research",
                "sec_num": null
            },
            {
                "text": "It should also be noted that none of the learned models can be easily replaced by a rule. While case assignment, for example, depends to a high degree on the lexical properties of the governing preposition or governing verb, other factors such as semantic relations, etc., play a significant role, so that any rule approaching the accuracy of the model would have to be quite complex.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion and future research",
                "sec_num": null
            },
            {
                "text": "We are currently adapting Amalgam to the task of French sentence realization, as a test of the linguistic generality of the system. Initial results are encouraging. It appears that much of the feature extraction and many of the linguistic operations are reusable.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion and future research",
                "sec_num": null
            }
        ],
        "back_matter": [
            {
                "text": "Our thanks go to Max Chickering for assistance with the WinMine decision tree tools and to Zhu Zhang who made significant contributions to the development of the extraposition models.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acknowledgements",
                "sec_num": null
            }
        ],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "Exploiting a probabilistic hierarchical model for generation",
                "authors": [
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Bangalore",
                        "suffix": ""
                    },
                    {
                        "first": "O",
                        "middle": [],
                        "last": "Rambow",
                        "suffix": ""
                    }
                ],
                "year": 2000,
                "venue": "Proceedings of the 18th International Conference on Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "S. Bangalore and O. Rambow 2000. Exploiting a probabilistic hierarchical model for generation. Proceedings of the 18th International Conference on Computational Linguistics (COLING 2000).",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "WinMine Toolkit Home Page",
                "authors": [
                    {
                        "first": "D",
                        "middle": [
                            "M"
                        ],
                        "last": "Chickering",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Nd",
                        "suffix": ""
                    }
                ],
                "year": null,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "D. M. Chickering. nd. WinMine Toolkit Home Page. http://research.microsoft.com/~dmax/WinMine/Tool doc.htm",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "A Bayesian approach to learning Bayesian networks with local structure",
                "authors": [
                    {
                        "first": "D",
                        "middle": [
                            "M"
                        ],
                        "last": "Chickering",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Heckerman",
                        "suffix": ""
                    },
                    {
                        "first": "C",
                        "middle": [],
                        "last": "Meek",
                        "suffix": ""
                    }
                ],
                "year": 1997,
                "venue": "Proceedings of the Thirteenth Conference",
                "volume": "",
                "issue": "",
                "pages": "80--89",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "D. M. Chickering, D. Heckerman and C. Meek. 1997. A Bayesian approach to learning Bayesian networks with local structure. In \"Uncertainty in Artificial Intelligence: Proceedings of the Thirteenth Conference\", D. Geiger and P. Punadlik Shenoy, ed., Morgan Kaufman, San Francisco, California, pp. 80-89.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "An overview of Amalgam: A machinelearned generation module",
                "authors": [
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Corston-Oliver",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Gamon",
                        "suffix": ""
                    },
                    {
                        "first": "E",
                        "middle": [],
                        "last": "Ringger",
                        "suffix": ""
                    },
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Moore",
                        "suffix": ""
                    }
                ],
                "year": 2002,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "S. Corston-Oliver, M. Gamon, E. Ringger, and R. Moore. 2002. An overview of Amalgam: A machine- learned generation module. To be presented at INLG 2002.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Aggregation in natural language generation",
                "authors": [
                    {
                        "first": "H",
                        "middle": [],
                        "last": "Dalianis",
                        "suffix": ""
                    },
                    {
                        "first": "E",
                        "middle": [],
                        "last": "Hovy",
                        "suffix": ""
                    }
                ],
                "year": 1993,
                "venue": "Proceedings of the 4th European Workshop on Natural Language Generation",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "H. Dalianis and E. Hovy 1993 Aggregation in natural language generation. Proceedings of the 4th European Workshop on Natural Language Generation, Pisa, Italy.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "Grundriss der deutschen Grammatik. Band2: Der Satz",
                "authors": [
                    {
                        "first": "P",
                        "middle": [],
                        "last": "Eisenberg",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "P. Eisenberg 1999. Grundriss der deutschen Grammatik. Band2: Der Satz. Metzler, Stuttgart/Weimar.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "Deutsche Grammatik",
                "authors": [
                    {
                        "first": "U",
                        "middle": [],
                        "last": "Engel",
                        "suffix": ""
                    }
                ],
                "year": 1996,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "U. Engel. 1996. Deutsche Grammatik. Groos, Heidelberg.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Extraposition: A case study in German sentence realization",
                "authors": [
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Gamon",
                        "suffix": ""
                    },
                    {
                        "first": "E",
                        "middle": [],
                        "last": "Ringger",
                        "suffix": ""
                    },
                    {
                        "first": "Z",
                        "middle": [],
                        "last": "Zhang",
                        "suffix": ""
                    },
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Moore",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Corston-Oliver",
                        "suffix": ""
                    }
                ],
                "year": 2002,
                "venue": "the 19th International Conference on Computational Linguistics (COLING)",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "M. Gamon, E. Ringger, Z. Zhang, R. Moore and S. Corston-Oliver. 2002a. Extraposition: A case study in German sentence realization. To be presented at the 19th International Conference on Computational Linguistics (COLING) 2002.",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "Amalgam: A machine-learned generation module",
                "authors": [
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Gamon",
                        "suffix": ""
                    },
                    {
                        "first": "E",
                        "middle": [],
                        "last": "Ringger",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Corston-Oliver",
                        "suffix": ""
                    }
                ],
                "year": 2002,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "M. Gamon, E. Ringger, S. Corston-Oliver. 2002b. Amalgam: A machine-learned generation module. Microsoft Research Technical Report, to appear.",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "A Handbook of Natural Language Processing: Techniques and Applications for the Processing of Language as Text",
                "authors": [
                    {
                        "first": "G",
                        "middle": [
                            "E"
                        ],
                        "last": "Heidorn",
                        "suffix": ""
                    }
                ],
                "year": 2002,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "G. E. Heidorn. 2002. Intelligent Writing Assistance. In \"A Handbook of Natural Language Processing: Techniques and Applications for the Processing of Language as Text\", R. Dale, H. Moisl, and H. Somers (ed.), Marce Dekker, New York.",
                "links": null
            },
            "BIBREF11": {
                "ref_id": "b11",
                "title": "The practical value of n-grams in generation",
                "authors": [
                    {
                        "first": "I",
                        "middle": [],
                        "last": "Langkilde",
                        "suffix": ""
                    },
                    {
                        "first": "K",
                        "middle": [],
                        "last": "Knight",
                        "suffix": ""
                    }
                ],
                "year": 1998,
                "venue": "Proceedings of the 9th International Workshop on Natural Language Generation",
                "volume": "",
                "issue": "",
                "pages": "248--255",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "I. Langkilde. and K. Knight. 1998a. The practical value of n-grams in generation. Proceedings of the 9th International Workshop on Natural Language Generation, Niagara-on-the-Lake, Canada. pp. 248- 255.",
                "links": null
            },
            "BIBREF12": {
                "ref_id": "b12",
                "title": "Generation that exploits corpus-based statistical knowledge",
                "authors": [
                    {
                        "first": "I",
                        "middle": [],
                        "last": "Langkilde",
                        "suffix": ""
                    },
                    {
                        "first": "K",
                        "middle": [],
                        "last": "Knight",
                        "suffix": ""
                    }
                ],
                "year": 1998,
                "venue": "Proceedings of the 36th ACL and 17th COLING (COLING-ACL",
                "volume": "",
                "issue": "",
                "pages": "704--710",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "I. Langkilde and K. Knight. 1998b. Generation that exploits corpus-based statistical knowledge. Proceedings of the 36th ACL and 17th COLING (COLING-ACL 1998). Montr\u00e9al, Qu\u00e9bec, Canada. 704-710.",
                "links": null
            },
            "BIBREF13": {
                "ref_id": "b13",
                "title": "The Syntactic Phenomena of English",
                "authors": [
                    {
                        "first": "J",
                        "middle": [
                            "D"
                        ],
                        "last": "Mccawley",
                        "suffix": ""
                    }
                ],
                "year": 1988,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "J. D. McCawley. 1988 The Syntactic Phenomena of English. The University of Chicago Press, Chicago and London.",
                "links": null
            },
            "BIBREF14": {
                "ref_id": "b14",
                "title": "Just what is aggregation anyway? Proceedings of the 7th European Workshop on Natural Language Generation",
                "authors": [
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Reape",
                        "suffix": ""
                    },
                    {
                        "first": "C",
                        "middle": [],
                        "last": "Mellish",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "M. Reape. and C. Mellish. 1999. Just what is aggregation anyway? Proceedings of the 7th European Workshop on Natural Language Generation, Toulouse, France.",
                "links": null
            },
            "BIBREF15": {
                "ref_id": "b15",
                "title": "preparation. A Linguistically Informed Generative Language Model for Intra-Constituent Ordering during Sentence Realization",
                "authors": [
                    {
                        "first": "E",
                        "middle": [],
                        "last": "Ringger",
                        "suffix": ""
                    },
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Moore",
                        "suffix": ""
                    },
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Gamon",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Corston-Oliver",
                        "suffix": ""
                    }
                ],
                "year": null,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "E. Ringger, R. Moore, M. Gamon, and S. Corston- Oliver. In preparation. A Linguistically Informed Generative Language Model for Intra-Constituent Ordering during Sentence Realization.",
                "links": null
            },
            "BIBREF16": {
                "ref_id": "b16",
                "title": "Segregatory Coordination and Ellipsis in Text Generation",
                "authors": [
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Shaw",
                        "suffix": ""
                    }
                ],
                "year": 1998,
                "venue": "Proceedings of COLING-ACL",
                "volume": "",
                "issue": "",
                "pages": "1220--1226",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "J. Shaw. 1998 Segregatory Coordination and Ellipsis in Text Generation. Proceedings of COLING-ACL, 1998, pp 1220-1226.",
                "links": null
            },
            "BIBREF17": {
                "ref_id": "b17",
                "title": "Aspekte der Relativsatzextraposition im Deutschen",
                "authors": [
                    {
                        "first": "H",
                        "middle": [],
                        "last": "Uszkoreit",
                        "suffix": ""
                    },
                    {
                        "first": "T",
                        "middle": [],
                        "last": "Brants",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [],
                        "last": "Duchier",
                        "suffix": ""
                    },
                    {
                        "first": "B",
                        "middle": [],
                        "last": "Krenn",
                        "suffix": ""
                    },
                    {
                        "first": "L",
                        "middle": [],
                        "last": "Konieczny",
                        "suffix": ""
                    },
                    {
                        "first": "S",
                        "middle": [],
                        "last": "Oepen",
                        "suffix": ""
                    },
                    {
                        "first": "W",
                        "middle": [],
                        "last": "Skut",
                        "suffix": ""
                    }
                ],
                "year": 1998,
                "venue": "Sonderforschungsbereich",
                "volume": "378",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "H. Uszkoreit, T. Brants, D. Duchier, B. Krenn, L. Konieczny, S. Oepen and W. Skut. 1998. Aspekte der Relativsatzextraposition im Deutschen. Claus- Report Nr.99, Sonderforschungsbereich 378, Universit\u00e4t des Saarlandes, Saarbr\u00fccken, Germany.",
                "links": null
            },
            "BIBREF18": {
                "ref_id": "b18",
                "title": "Aggregation in Natural Language Generation: Another Look. Co-op work term report",
                "authors": [
                    {
                        "first": "J",
                        "middle": [],
                        "last": "Wilkinson",
                        "suffix": ""
                    }
                ],
                "year": 1995,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "J. Wilkinson 1995 Aggregation in Natural Language Generation: Another Look. Co-op work term report, Department of Computer Science, University of Waterloo.",
                "links": null
            }
        },
        "ref_entries": {
            "FIGREF0": {
                "uris": null,
                "type_str": "figure",
                "num": null,
                "text": "Stage 1 Pre-processing (RB): degraphing of the semantic representation retrieval of lexical information Stage 2 Flesh-out (ML): assignment of syntactic labels insertion of function words assignment of case and verb position features Stage 3 Conversion to syntactic tree (RB): introduction of syntactic representation for coordination splitting of separable prefix verbs based on both lexical information and previously assigned verb position features reversal of heads (e.g., in quantitative expressions) (ML) Stage 4 Movement: extraposition (ML) raising, wh movement (RB) Inflectional generation (RB)"
            },
            "FIGREF1": {
                "uris": null,
                "type_str": "figure",
                "num": null,
                "text": "various linguistic features in the local context (parent node and grandparent node): number and person, definiteness, voice, mood, transitivity, presence of logical subject and object, presence of certain semantic attributes, coordination, prepositional relations \u2022 syntactic label \u2022 presence of modal verbs \u2022 prepositional relations \u2022 transitivity Features on the attachment site \u2022 presence of logical subject \u2022 status of the parent and grandparent as a separable prefix verb \u2022 voice and presence of modal verbs on the parent and grandparent \u2022 presence of arguments and transitivity features on the parent and grandparent \u2022 number, person and definiteness; the same on parent and grandparent \u2022 syntactic label; the same on the parent and grandparent \u2022 verb position; the same on the parent \u2022 prepositional relation on parent and grandparent \u2022 semantic relation that parent and grandparent have to their respective parent node"
            },
            "FIGREF2": {
                "uris": null,
                "type_str": "figure",
                "num": null,
                "text": "\u2022 semantic relation to the parent of the duplicated node, its parent and grandparent \u2022 part of speech of the duplicated node \u2022 verb position across the coordinated node \u2022 position of the duplicated node in premodifiers or postmodifiers of the parent \u2022 coordination of the duplicated node and the grandparent of the duplicated node \u2022 status of parent and grandparent as a proposition \u2022 number feature on the parent \u2022 transitivity and presence of a direct object on the parent"
            },
            "TABREF0": {
                "html": null,
                "content": "<table><tr><td>Value</td><td>Accuracy</td></tr><tr><td>Dat</td><td>0.8705</td></tr><tr><td>Acc</td><td>0.9707</td></tr><tr><td>Gen</td><td>0.9457</td></tr><tr><td>Nom</td><td>0.9654</td></tr><tr><td>overall</td><td>0.9352</td></tr></table>",
                "type_str": "table",
                "text": "Accuracy of the case assignment model.",
                "num": null
            },
            "TABREF1": {
                "html": null,
                "content": "<table><tr><td/><td>Value</td><td colspan=\"2\">Precision Recall F-measure</td></tr><tr><td/><td>Initial</td><td>0.9650</td><td>0.9809 0.9729</td></tr><tr><td/><td>Second</td><td>0.9754</td><td>0.9740 0.9743</td></tr><tr><td/><td>Final</td><td>0.9420</td><td>0.9749 0.9581</td></tr><tr><td/><td colspan=\"2\">Undefined 0.5868</td><td>0.3869 0.4663</td></tr><tr><td/><td>Overall</td><td>0.9491</td></tr><tr><td/><td>accuracy</td><td/></tr><tr><td>6</td><td colspan=\"2\">Extraposition</td></tr><tr><td colspan=\"4\">In both German and English it is possible to</td></tr><tr><td colspan=\"4\">extrapose clausal material to the right periphery of</td></tr><tr><td colspan=\"4\">the sentence (extraposed clauses underlined in the</td></tr><tr><td colspan=\"3\">examples below):</td></tr><tr><td colspan=\"4\">Relative clause extraposition:</td></tr><tr><td/><td colspan=\"3\">English: A man just left who had come to</td></tr><tr><td/><td colspan=\"2\">ask a question.</td></tr><tr><td/><td colspan=\"3\">German: Der Mann ist gerade</td></tr><tr><td/><td colspan=\"3\">weggegangen, der gekommen war, um</td></tr><tr><td/><td colspan=\"3\">eine Frage zu stellen.</td></tr><tr><td colspan=\"4\">Infinitival clause extraposition:</td></tr><tr><td/><td colspan=\"3\">English: A decision was made to leave the</td></tr><tr><td/><td colspan=\"2\">country.</td></tr><tr><td/><td colspan=\"3\">German: Eine Entscheidung wurde</td></tr><tr><td/><td colspan=\"3\">getroffen, das Land zu verlassen.</td></tr><tr><td colspan=\"4\">Complement clause extraposition:</td></tr><tr><td/><td colspan=\"3\">English: A rumour has been circulating</td></tr><tr><td/><td colspan=\"2\">that he is ill.</td></tr><tr><td/><td colspan=\"3\">German: Ein Ger\u00fccht ging um, dass er</td></tr><tr><td/><td colspan=\"2\">krank ist.</td></tr><tr><td/><td colspan=\"3\">Extraposition is not obligatory like other types</td></tr><tr><td colspan=\"4\">of movement (such as Wh-movement). Both</td></tr><tr><td colspan=\"4\">extraposed and non-extraposed versions of a</td></tr><tr><td colspan=\"4\">sentence are acceptable, with varying degrees of</td></tr><tr><td colspan=\"2\">fluency.</td><td/></tr></table>",
                "type_str": "table",
                "text": "Precision, recall, and F-measure for the verb position model.",
                "num": null
            },
            "TABREF2": {
                "html": null,
                "content": "<table><tr><td colspan=\"3\">Extraposable clause Accuracy Baseline</td></tr><tr><td>RELCL</td><td>0.8387</td><td>0.6093</td></tr><tr><td>INFCL</td><td>0.9202</td><td>0.9370</td></tr><tr><td>COMPCL</td><td>0.9857</td><td>0.9429</td></tr><tr><td>Overall</td><td>0.8612</td><td>0.6758</td></tr></table>",
                "type_str": "table",
                "text": "Accuracy of the extraposition model.",
                "num": null
            },
            "TABREF3": {
                "html": null,
                "content": "<table><tr><td>Value</td><td colspan=\"2\">Precision Recall F-measure</td></tr><tr><td>last</td><td>0.9191</td><td>0.9082 0.9136</td></tr><tr><td>first</td><td>0.9837</td><td>0.9867 0.9851</td></tr><tr><td>middle</td><td>0.0000</td><td>0.0000 0.0000</td></tr><tr><td>overall</td><td>0.9746</td><td/></tr><tr><td>accuracy</td><td/><td/></tr></table>",
                "type_str": "table",
                "text": "Precision, recall, and F-measure for the syntactic aggregation model.",
                "num": null
            }
        }
    }
}