File size: 98,236 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
{
    "paper_id": "P10-1010",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T09:20:01.140294Z"
    },
    "title": "The Human Language Project: Building a Universal Corpus of the World's Languages",
    "authors": [
        {
            "first": "Steven",
            "middle": [],
            "last": "Abney",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "University of Michigan",
                "location": {}
            },
            "email": "abney@umich.edu"
        },
        {
            "first": "Steven",
            "middle": [],
            "last": "Bird",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "University of Pennsylvania",
                "location": {}
            },
            "email": "sbird@unimelb.edu.au"
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "We present a grand challenge to build a corpus that will include all of the world's languages, in a consistent structure that permits large-scale cross-linguistic processing, enabling the study of universal linguistics. The focal data types, bilingual texts and lexicons, relate each language to one of a set of reference languages. We propose that the ability to train systems to translate into and out of a given language be the yardstick for determining when we have successfully captured a language. We call on the computational linguistics community to begin work on this Universal Corpus, pursuing the many strands of activity described here, as their contribution to the global effort to document the world's linguistic heritage before more languages fall silent.",
    "pdf_parse": {
        "paper_id": "P10-1010",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "We present a grand challenge to build a corpus that will include all of the world's languages, in a consistent structure that permits large-scale cross-linguistic processing, enabling the study of universal linguistics. The focal data types, bilingual texts and lexicons, relate each language to one of a set of reference languages. We propose that the ability to train systems to translate into and out of a given language be the yardstick for determining when we have successfully captured a language. We call on the computational linguistics community to begin work on this Universal Corpus, pursuing the many strands of activity described here, as their contribution to the global effort to document the world's linguistic heritage before more languages fall silent.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "The grand aim of linguistics is the construction of a universal theory of human language. To a computational linguist, it seems obvious that the first step is to collect significant amounts of primary data for a large variety of languages. Ideally, we would like a complete digitization of every human language: a Universal Corpus.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "If we are ever to construct such a corpus, it must be now. With the current rate of language loss, we have only a small window of opportunity before the data is gone forever. Linguistics may be unique among the sciences in the crisis it faces. The next generation will forgive us for the most egregious shortcomings in theory construction and technology development, but they will not forgive us if we fail to preserve vanishing primary language data in a form that enables future research. The scope of the task is enormous. At present,",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "we have non-negligible quantities of machinereadable data for only about 20-30 of the world's 6,900 languages (Maxwell and Hughes, 2006) .",
                "cite_spans": [
                    {
                        "start": 110,
                        "end": 136,
                        "text": "(Maxwell and Hughes, 2006)",
                        "ref_id": "BIBREF14"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Linguistics as a field is awake to the crisis. There has been a tremendous upsurge of interest in documentary linguistics, the field concerned with the the \"creation, annotation, preservation, and dissemination of transparent records of a language\" (Woodbury, 2010). However, documentary linguistics alone is not equal to the task. For example, no million-word machine-readable corpus exists for any endangered language, even though such a quantity would be necessary for wide-ranging investigation of the language once no speakers are available. The chances of constructing large-scale resources will be greatly improved if computational linguists contribute their expertise. This collaboration between linguists and computational linguists will extend beyond the construction of the Universal Corpus to its exploitation for both theoretical and technological ends. We envisage a new paradigm of universal linguistics, in which grammars of individual languages are built from the ground up, combining expert manual effort with the power tools of probabilistic language models and grammatical inference. A universal grammar captures redundancies which exist across languages, constituting a \"universal linguistic prior,\" and enabling us to identify the distinctive properties of specific languages and families. The linguistic prior and regularities due to common descent enable a new economy of scale for technology development: cross-linguistic triangulation can improve performance while reducing per-language data requirements.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Our aim in the present paper is to move beyond generalities to a concrete plan of attack, and to challenge the field to a communal effort to create a Universal Corpus of the world's languages, in consistent machine-readable format, permitting large-scale cross-linguistic processing.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "2 Human Language Project",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Although language endangerment provides urgency, the corpus is not intended primarily as a Noah's Ark for languages. The aims go beyond the current crisis: we wish to support crosslinguistic research and technology development at the largest scale. There are existing collections that contain multiple languages, but it is rare to have consistent formats and annotation across languages, and few such datasets contain more than a dozen or so languages.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Aims and scope",
                "sec_num": "2.1"
            },
            {
                "text": "If we think of a multi-lingual corpus as consisting of an array of items, with columns representing languages and rows representing resource types, the usual focus is on \"vertical\" processing. Our particular concern, by contrast, is \"horizontal\" processing that cuts indiscriminately across languages. Hence we require an unusual degree of consistency across languages.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Aims and scope",
                "sec_num": "2.1"
            },
            {
                "text": "The kind of processing we wish to enable is much like the large-scale systematic research that motivated the Human Genome Project.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Aims and scope",
                "sec_num": "2.1"
            },
            {
                "text": "One of the greatest impacts of having the sequence may well be in enabling an entirely new approach to biological research. In the past, researchers studied one or a few genes at a time. With whole-genome sequences . . . they can approach questions systematically and on a grand scale. They can study . . . how tens of thousands of genes and proteins work together in interconnected networks to orchestrate the chemistry of life. (Human Genome Project, 2007) We wish to make it possible to investigate human language equally systematically and on an equally grand scale: a Human Linguome Project, as it were, though we have chosen the \"Human Language Project\" as a more inviting title for the undertaking. The product is a Universal Corpus, 1 in two senses of universal: in the sense of including (ultimately) all the world's languages, and in the sense of enabling software and processing methods that are language-universal.",
                "cite_spans": [
                    {
                        "start": 430,
                        "end": 458,
                        "text": "(Human Genome Project, 2007)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Aims and scope",
                "sec_num": "2.1"
            },
            {
                "text": "However, we do not aim for a collection that is universal in the sense of encompassing all language documentation efforts. Our goal is the construction of a specific resource, albeit a very large 1 http://universalcorpus.org/ resource. We contrast the proposed effort with general efforts to develop open resources, standards, and best practices. We do not aim to be allinclusive. The project does require large-scale collaboration, and a task definition that is simple and compelling enough to achieve buy-in from a large number of data providers. But we do not need and do not attempt to create consensus across the entire community. (Although one can hope that what proves successful for a project of this scale will provide a good foundation for future standards.)",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Aims and scope",
                "sec_num": "2.1"
            },
            {
                "text": "Moreover, we do not aim to collect data merely in the vague hope that it will prove useful. Although we strive for maximum generality, we also propose a specific driving \"use case,\" namely, machine translation (MT), (Hutchins and Somers, 1992; Koehn, 2010) . The corpus provides a testing ground for the development of MT system-construction methods that are dramatically \"leaner\" in their resource requirements, and which take advantage of cross-linguistic bootstrapping. The large engineering question is how one can turn the size of the task-constructing MT systems for all the world's languages simultaneously-to one's advantage, and thereby consume dramatically less data per language.",
                "cite_spans": [
                    {
                        "start": 216,
                        "end": 243,
                        "text": "(Hutchins and Somers, 1992;",
                        "ref_id": "BIBREF11"
                    },
                    {
                        "start": 244,
                        "end": 256,
                        "text": "Koehn, 2010)",
                        "ref_id": "BIBREF12"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Aims and scope",
                "sec_num": "2.1"
            },
            {
                "text": "The choice of MT as the use case is also driven by scientific considerations. To explain, we require a bit of preamble.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Aims and scope",
                "sec_num": "2.1"
            },
            {
                "text": "We aim for a digitization of each human language. What exactly does it mean to digitize an entire language? It is natural to think in terms of replicating the body of resources available for well-documented languages, and the pre-eminent resource for any language is a treebank. Producing a treebank involves a staggering amount of manual effort. It is also notoriously difficult to obtain agreement about how parse trees should be defined in one language, much less in many languages simultaneously. The idea of producing treebanks for 6,900 languages is quixotic, to put it mildly. But is a treebank actually necessary?",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Aims and scope",
                "sec_num": "2.1"
            },
            {
                "text": "Let us suppose that the purpose of a parse tree is to mediate interpretation. A treebank, arguably, represents a theoretical hypothesis about how interpretations could be constructed; the primary data is actually the interpretations themselves. This suggests that we annotate sentences with representations of meanings instead of syntactic structures. Now that seems to take us out of the frying pan into the fire. If obtaining consen-sus on parse trees is difficult, obtaining consensus on meaning representations is impossible. However, if the language under consideration is anything other than English, then a translation into English (or some other reference language) is for most purposes a perfectly adequate meaning representation. That is, we view machine translation as an approximation to language understanding.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Aims and scope",
                "sec_num": "2.1"
            },
            {
                "text": "Here is another way to put it. One measure of adequacy of a language digitization is the ability of a human-already fluent in a reference language-to acquire fluency in the digitized language using only archived material. Now it would be even better if we could use a language digitization to construct an artificial speaker of the language. Importantly, we do not need to solve the AI problem: the speaker need not decide what to say, only how to translate from meanings to sentences of the language, and from sentences back to meanings. Taking sentences in a reference language as the meaning representation, we arrive back at machine translation as the measure of success. In short, we have successfully captured a language if we can translate into and out of the language.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Aims and scope",
                "sec_num": "2.1"
            },
            {
                "text": "The key resource that should be built for each language, then, is a collection of primary texts with translations into a reference language. \"Primary text\" includes both written documents and transcriptions of recordings. Large volumes of primary texts will be useful even without translation for such tasks as language modeling and unsupervised learning of morphology. Thus, we anticipate that the corpus will have the usual \"pyramidal\" structure, starting from a base layer of unannotated text, some portion of which is translated into a reference language at the document level to make the next layer. Note that, for maximally authentic primary texts, we assume the direction of translation will normally be from primary text to reference language, not the other way around.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Aims and scope",
                "sec_num": "2.1"
            },
            {
                "text": "Another layer of the corpus consists of sentence and word alignments, required for training and evaluating machine translation systems, and for extracting bilingual lexicons. Curating such annotations is a more specialized task than translation, and so we expect it will only be done for a subset of the translated texts.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Aims and scope",
                "sec_num": "2.1"
            },
            {
                "text": "In the last and smallest layer, morphology is annotated. This supports the development of morphological analyzers, to preprocess primary texts to identify morpheme boundaries and recognize allomorphs, reducing the amount of data required for training an MT system. This most-refined target annotation corresponds to the interlinear glossed texts that are the de facto standard of annotation in the documentary linguistics community.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Aims and scope",
                "sec_num": "2.1"
            },
            {
                "text": "We postulate that interlinear glossed text is sufficiently fine-grained to serve our purposes. It invites efforts to enrich it by automatic means: for example, there has been work on parsing the English translations and using the word-by-word glosses to transfer the parse tree to the object language, effectively creating a treebank automatically (Xia and Lewis, 2007) . At the same time, we believe that interlinear glossed text is sufficiently simple and well-understood to allow rapid construction of resources, and to make cross-linguistic consistency a realistic goal.",
                "cite_spans": [
                    {
                        "start": 348,
                        "end": 369,
                        "text": "(Xia and Lewis, 2007)",
                        "ref_id": "BIBREF24"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Aims and scope",
                "sec_num": "2.1"
            },
            {
                "text": "Each of these layers-primary text, translations, alignments, and morphological glosses-seems to be an unavoidable piece of the overall solution. The fact that these layers will exist in diminishing quantity is also unavoidable. However, there is an important consequence: the primary texts will be permanently subject to new translation initiatives, which themselves will be subject to new alignment and glossing initiatives, in which each step is an instance of semisupervised learning (Abney, 2007) . As time passes, our ability to enhance the quantity and quality of the annotations will only increase, thanks to effective combinations of automatic, professional, and crowd-sourced effort.",
                "cite_spans": [
                    {
                        "start": 487,
                        "end": 500,
                        "text": "(Abney, 2007)",
                        "ref_id": "BIBREF0"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Aims and scope",
                "sec_num": "2.1"
            },
            {
                "text": "The basic principles upon which the envisioned corpus is based are the following:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Principles",
                "sec_num": "2.2"
            },
            {
                "text": "Universality. Covering as many languages as possible is the first priority. Progress will be gauged against concrete goals for numbers of languages, data per language, and coverage of language families (Whalen and Simons, 2009) .",
                "cite_spans": [
                    {
                        "start": 214,
                        "end": 227,
                        "text": "Simons, 2009)",
                        "ref_id": "BIBREF22"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Principles",
                "sec_num": "2.2"
            },
            {
                "text": "Machine readability and consistency. \"Covering\" languages means enabling machine processing seamlessly across languages. This will support new types of linguistic inquiry and the development and testing of inference methods (for morphology, parsers, machine translation) across large numbers of typologically diverse languages.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Principles",
                "sec_num": "2.2"
            },
            {
                "text": "Community effort. We cannot expect a single organization to assemble a resource on this scale. It will be necessary to get community buy-in, and many motivated volunteers. The repository will not be the sole possession of any one institution.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Principles",
                "sec_num": "2.2"
            },
            {
                "text": "Availability. The content of the corpus will be available under one or more permissive licenses, such as the Creative Commons Attribution License (CC-BY), placing as few limits as possible on community members' ability to obtain and enhance the corpus, and redistribute derivative data.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Principles",
                "sec_num": "2.2"
            },
            {
                "text": "Utility. The corpus aims to be maximally useful, and minimally parochial. Annotation will be as lightweight as possible; richer annotations will will emerge bottom-up as they prove their utility at the large scale.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Principles",
                "sec_num": "2.2"
            },
            {
                "text": "Centrality of primary data. Primary texts and recordings are paramount. Secondary resources such as grammars and lexicons are important, but no substitute for primary data. It is desirable that secondary resources be integrated with-if not derived from-primary data in the corpus.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Principles",
                "sec_num": "2.2"
            },
            {
                "text": "What should be included in the corpus? To some extent, data collection will be opportunistic, but it is appropriate to have a well-defined target in mind. We consider the following essential.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "What to include",
                "sec_num": "2.3"
            },
            {
                "text": "Metadata. One means of resource identification is to survey existing documentation for the language, including bibliographic references and locations of web resources. Provenance and proper citation of sources should be included for all data.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "What to include",
                "sec_num": "2.3"
            },
            {
                "text": "For written text. (1) Primary documents in original printed form, e.g. scanned page images or PDF. (2) Transcription. Not only optical character recognition output, but also the output of tools that extract text from PDF, will generally require manual editing.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "What to include",
                "sec_num": "2.3"
            },
            {
                "text": "For spoken text. (1) Audio recordings. Both elicited and spontaneous speech should be included. It is highly desirous to have some connected speech for every language. (2) Slow speech \"audio transcriptions.\" Carefully respeaking a spoken text can be much more efficient than written transcription, and may one day yield to speech recognition methods. (3) Written transcriptions. We do not impose any requirements on the form of transcription, though orthographic transcription is generally much faster to produce than phonetic transcription, and may even be more useful as words are represented by normalized forms.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "What to include",
                "sec_num": "2.3"
            },
            {
                "text": "For both written and spoken text. (1) Translations of primary documents into a reference language (possibly including commentary).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "What to include",
                "sec_num": "2.3"
            },
            {
                "text": "(2) Sentence-level segmentation and translation. (3) Word-level segmentation and glossing. (4) Morpheme-level segmentation and glossing.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "What to include",
                "sec_num": "2.3"
            },
            {
                "text": "All documents will be included in primary form, but the percentage of documents with manual annotation, or manually corrected annotation, decreases at increasingly fine-grained levels of annotation. Where manual fine-grained annotation is unavailable, automatic methods for creating it (at a lower quality) are desirable. Defining such methods for a large range of resource-poor languages is an interesting computational challenge.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "What to include",
                "sec_num": "2.3"
            },
            {
                "text": "Secondary resources. Although it is possible to base descriptive analyses exclusively on a text corpus (Himmelmann, 2006, p. 22) , the following secondary resources should be secured if they are available: (1) A lexicon with glosses in a reference language. Ideally, everything should be attested in the texts, but as a practical matter, there will be words for which we have only a lexical entry and no instances of use. (2) Paradigms and phonology, for the construction of a morphological analyzer. Ideally, they should be inducible from the texts, but published grammatical information may go beyond what is attested in the text.",
                "cite_spans": [
                    {
                        "start": 103,
                        "end": 128,
                        "text": "(Himmelmann, 2006, p. 22)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "What to include",
                "sec_num": "2.3"
            },
            {
                "text": "Our key desideratum is support for automatic processing across a large range of languages. No data collection effort currently exists or is proposed, to our knowledge, that addresses this desideratum. Traditional language archives such as the Audio Archive of Linguistic Fieldwork (UC Berkeley), Documentation of Endangered Languages (Max Planck Institute, Nijmegen), the Endangered Languages Archive (SOAS, University of London), and the Pacific And Regional Archive for Digital Sources in Endangered Cultures (Australia) offer broad coverage of languages, but the majority of their offerings are restricted in availability and do not support machine processing. Conversely, large-scale data collection efforts by the Linguistic Data Consortium and the European Language Resources Association cover less than one percent of the world's languages, with no evident plans for major expansion of coverage. Other efforts concern the definition and aggregation of language resource metadata, including OLAC, IMDI, and CLARIN (Simons and Bird, 2003; Broeder and Wittenburg, 2006; V\u00e1radi et al., 2008) , but this is not the same as collecting and disseminating data.",
                "cite_spans": [
                    {
                        "start": 1020,
                        "end": 1043,
                        "text": "(Simons and Bird, 2003;",
                        "ref_id": "BIBREF17"
                    },
                    {
                        "start": 1044,
                        "end": 1073,
                        "text": "Broeder and Wittenburg, 2006;",
                        "ref_id": "BIBREF3"
                    },
                    {
                        "start": 1074,
                        "end": 1094,
                        "text": "V\u00e1radi et al., 2008)",
                        "ref_id": "BIBREF19"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Inadequacy of existing efforts",
                "sec_num": "2.4"
            },
            {
                "text": "Initiatives to develop standard formats for linguistic annotations are orthogonal to our goals. The success of the project will depend on contributed data from many sources, in many different formats. Converting all data formats to an official standard, such as the RDF-based models being developed by ISO Technical Committee 37 Sub-committee 4 Working Group 2, is simply impractical. These formats have onerous syntactic and semantic requirements that demand substantial further processing together with expert judgment, and threaten to crush the large-scale collaborative data collection effort we envisage, before it even gets off the ground. Instead, we opt for a very lightweight format, sketched in the next section, to minimize the effort of conversion and enable an immediate start. This does not limit the options of community members who desire richer formats, since they are free to invest the effort in enriching the existing data. Such enrichment efforts may gain broad support if they deliver a tangible benefit for cross-language processing.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Inadequacy of existing efforts",
                "sec_num": "2.4"
            },
            {
                "text": "Here we sketch a simple approach to storage of texts (including transcribed speech), bitexts, interlinear glossed text, and lexicons. We have been deliberately schematic since the goal is just to give grounds for confidence that there exists a general, scalable solution.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "A Simple Storage Model",
                "sec_num": "3"
            },
            {
                "text": "For readability, our illustrations will include space-separated sequences of tokens. However, behind the scenes these could be represented as a sequence of pairs of start and end offsets into a primary text or speech signal, or as a sequence of integers that reference an array of strings. Thus, when we write (1a), bear in mind it may be implemented as (1b) or (1c).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "A Simple Storage Model",
                "sec_num": "3"
            },
            {
                "text": "(1) a. This is a point of order . b. (0,4), (5,7), (8,9), (10,15), (16,18), . . . c. 9347, 3053, 0038, 3342, 3468, . . . In what follows, we focus on the minimal requirements for storing and disseminating aligned text, not the requirements for efficient in-memory data structures. Moreover, we are agnostic about whether the normalized, tokenized format is stored entire or computed on demand.",
                "cite_spans": [
                    {
                        "start": 67,
                        "end": 120,
                        "text": "(16,18), . . . c. 9347, 3053, 0038, 3342, 3468, . . .",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "A Simple Storage Model",
                "sec_num": "3"
            },
            {
                "text": "We take an aligned text to be composed of a series of aligned sentences, each consisting of a small set of attributes and values, e.g.:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "A Simple Storage Model",
                "sec_num": "3"
            },
            {
                "text": "ID: europarl/swedish/ep-00-01-17/18 LANGS: swd eng SENT: det g\u00e4ller en ordningsfr\u00e5ga TRANS: this is a point of order ALIGN: 1-1 2-2 3-3 4-4 4-5 4-6 PROVENANCE: pharaoh-v1.2, ... REV: 8947 2010-05-02 10:35:06 leobfld12 RIGHTS: Copyright (C) 2010 Uni...; CC-BY The value of ID identifies the document and sentence, and any collection to which the document belongs. Individual components of the identifier can be referenced or retrieved. The LANGS attribute identifies the source and reference language using ISO 639 codes. 2 The SENT attribute contains space-delimited tokens comprising a sentence. Optional attributes TRANS and ALIGN hold the translation and alignment, if these are available; they are omitted in monolingual text. A provenance attribute records any automatic or manual processes which apply to the record, and a revision attribute contains the version number, timestamp, and username associated with the most recent modification of the record, and a rights attribute contains copyright and license information.",
                "cite_spans": [
                    {
                        "start": 521,
                        "end": 522,
                        "text": "2",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "A Simple Storage Model",
                "sec_num": "3"
            },
            {
                "text": "When morphological annotation is available, it is represented by two additional attributes, LEX and AFF. Here is a monolingual example:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "A Simple Storage Model",
                "sec_num": "3"
            },
            {
                "text": "ID: example/001 LANGS: eng SENT: the dogs are barking LEX: the dog be bark AFF: -PL PL ING Note that combining all attributes of these two examples-that is, combining word-by-word translation with morphological analysis-yields interlinear glossed text.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "A Simple Storage Model",
                "sec_num": "3"
            },
            {
                "text": "A bilingual lexicon is an indispensable resource, whether provided as such, induced from a collection of aligned text, or created by merging contributed and induced lexicons. A bilingual lexicon can be viewed as an inventory of cross-language correspondences between words or groups of words. These correspondences are just aligned text fragments, albeit much smaller than a sentence. Thus, we take a bilingual lexicon to be a kind of text in which each record contains a single lexeme and its translation, represented using the LEX and TRANS attributes we have already introduced, e.g.:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "A Simple Storage Model",
                "sec_num": "3"
            },
            {
                "text": "ID: swedishlex/v3.2/0419 LANGS: swd eng LEX: ordningsfr\u00e5ga TRANS: point of order",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "A Simple Storage Model",
                "sec_num": "3"
            },
            {
                "text": "In sum, the Universal Corpus is represented as a massive store of records, each representing a single sentence or lexical entry, using a limited set of attributes. The store is indexed for efficient access, and supports access to slices identified by language, content, provenance, rights, and so forth. Many component collections would be \"unioned\" into this single, large Corpus, with only the record identifiers capturing the distinction between the various data sources.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "A Simple Storage Model",
                "sec_num": "3"
            },
            {
                "text": "Special cases of aligned text and wordlists, spanning more than 1,000 languages, are Bible translations and Swadesh wordlists (Resnik et al., 1999; Swadesh, 1955) . Here there are obvious use-cases for accessing a particular verse or word across all languages. However, it is not necessary to model n-way language alignments. Instead, such sources are implicitly aligned by virtue of their structure. Extracting all translations of a verse, or all cognates of a Swadesh wordlist item, is an index operation that returns monolingual records, e.g.:",
                "cite_spans": [
                    {
                        "start": 126,
                        "end": 147,
                        "text": "(Resnik et al., 1999;",
                        "ref_id": "BIBREF15"
                    },
                    {
                        "start": 148,
                        "end": 162,
                        "text": "Swadesh, 1955)",
                        "ref_id": "BIBREF18"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "A Simple Storage Model",
                "sec_num": "3"
            },
            {
                "text": "ID: swadesh/47 ID: swadesh/47 LANGS: fra LANGS: eng LEX: chien LEX: dog",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "A Simple Storage Model",
                "sec_num": "3"
            },
            {
                "text": "Data collection on this scale is a daunting prospect, yet it is important to avoid the paralysis of over-planning. We can start immediately by leveraging existing infrastructure, and the voluntary effort of interested members of the language resources community. One possibility is to found a \"Language Commons,\" an open access repository of language resources hosted in the Internet Archive, with a lightweight method for community members to contribute data sets. A fully processed and indexed version of selected data can be made accessible via a web services interface to a major cloud storage facility, such as Amazon Web Services. A common query interface could be supported via APIs in multiple NLP toolkits such as NLTK and GATE (Bird et al., 2009; Cunningham et al., 2002) , and also in generic frameworks such as UIMA and SOAP, leaving developers to work within their preferred environment.",
                "cite_spans": [
                    {
                        "start": 737,
                        "end": 756,
                        "text": "(Bird et al., 2009;",
                        "ref_id": "BIBREF1"
                    },
                    {
                        "start": 757,
                        "end": 781,
                        "text": "Cunningham et al., 2002)",
                        "ref_id": "BIBREF6"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Building the Corpus",
                "sec_num": "4"
            },
            {
                "text": "We hope that potential contributors of data will be motivated to participate primarily by agreement with the goals of the project. Even someone who has specialized in a particular language or language family maintains an interest, we expect, in the universal question-the exploration of Language writ large.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Motivation for data providers",
                "sec_num": "4.1"
            },
            {
                "text": "Data providers will find benefit in the availability of volunteers for crowd-sourcing, and tools for (semi-)automated quality control, refinement, and presentation of data. For example, a data holder should be able to contribute recordings and get help in transcribing them, through a combination of volunteer labor and automatic processing.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Motivation for data providers",
                "sec_num": "4.1"
            },
            {
                "text": "Documentary linguists and computational linguists have much to gain from collaboration. In return for the data that documentary linguistics can provide, computational linguistics has the potential to revolutionize the tools and practice of language documentation.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Motivation for data providers",
                "sec_num": "4.1"
            },
            {
                "text": "We also seek collaboration with communities of language speakers. The corpus provides an economy of scale for the development of literacy materials and tools for interactive language instruction, in support of language preservation and revitalization. For small languages, literacy in the mother tongue is often defended on the grounds that it provides the best route to literacy in the national language (Wagner, 1993, ch. 8 ). An essential ingredient of any local literacy program is to have a substantial quantity of available texts that represent familiar topics including cultural heritage, folklore, personal narratives, and current events. Transition to literacy in a language of wider communication is aided when transitional materials are available (Waters, 1998, pp. 61ff) . Mutual benefits will also flow from the development of tools for low-cost publication and broadcast in the language, with copies of the published or broadcast material licensed to and archived in the corpus.",
                "cite_spans": [
                    {
                        "start": 405,
                        "end": 425,
                        "text": "(Wagner, 1993, ch. 8",
                        "ref_id": null
                    },
                    {
                        "start": 758,
                        "end": 782,
                        "text": "(Waters, 1998, pp. 61ff)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Motivation for data providers",
                "sec_num": "4.1"
            },
            {
                "text": "The enterprise requires collaboration of many individuals and groups, in a variety of roles.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Roles",
                "sec_num": "4.2"
            },
            {
                "text": "Editors. A critical group are people with sufficient engagement to serve as editors for particular language families, who have access to data or are able to negotiate redistribution rights, and oversee the workflow of transcription, translation, and annotation.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Roles",
                "sec_num": "4.2"
            },
            {
                "text": "CL Research. All manual annotation steps need to be automated. Each step presents a challenging semi-supervised learning and cross-linguistic bootstrapping problem. In addition, the overall measure of success-induction of machine translation systems from limited resources-pushes the state of the art (Kumar et al., 2007) . Numerous other CL problems arise: active learning to improve the quality of alignments and bilingual lexicons; automatic language identification for lowdensity languages; and morphology learning.",
                "cite_spans": [
                    {
                        "start": 301,
                        "end": 321,
                        "text": "(Kumar et al., 2007)",
                        "ref_id": "BIBREF13"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Roles",
                "sec_num": "4.2"
            },
            {
                "text": "Tool builders. We need tools for annotation, format conversion, spidering and language identification, search, archiving, and presentation. Innovative crowd-sourcing solutions are of particular interest, e.g. web-based functionality for transcribing audio and video of oral literature, or setting up a translation service based on aligned texts for a low-density language, and collecting the improved translations suggested by users.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Roles",
                "sec_num": "4.2"
            },
            {
                "text": "Volunteer annotators. An important reason for keeping the data model as lightweight as possible is to enable contributions from volunteers with little or no linguistic training. Two models are the volunteers who scan documents and correct OCR output in Project Gutenberg, or the undergraduate volunteers who have constructed Greek and Latin treebanks within Project Perseus (Crane, 2010). Bilingual lexicons that have been extracted from aligned text collections might be corrected using crowd-sourcing, leading to improved translation models and improved alignments. We also see the Universal Corpus as an excellent opportunity for undergraduates to participate in research, and for native speakers to participate in the preservation of their language. Documentary linguists. The collection protocol known as Basic Oral Language Documentation (BOLD) enables documentary linguists to collect 2-3 orders of magnitude more oral discourse than before (Bird, 2010) . Linguists can equip local speakers to collect written texts, then to carefully \"respeak\" and orally translate the texts into a reference language. With suitable tools, incorporating active learning, local speakers could further curate bilingual texts and lexicons. An early need is pilot studies to determine costings for different categories of language.",
                "cite_spans": [
                    {
                        "start": 948,
                        "end": 960,
                        "text": "(Bird, 2010)",
                        "ref_id": "BIBREF2"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Roles",
                "sec_num": "4.2"
            },
            {
                "text": "Data agencies. The LDC and ELRA have a central role to play, given their track record in obtaining, curating, and publishing data with licenses that facilitate language technology development. We need to identify key resources where negotiation with the original data provider, and where payment of all preparation costs plus compensation for lost revenue, leads to new material for the Corpus. This is a new publication model and a new business model, but it can co-exist with the existing models.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Roles",
                "sec_num": "4.2"
            },
            {
                "text": "Language archives. Language archives have a special role to play as holders of unique materials. They could contribute existing data in its native format, for other participants to process. They could give bilingual texts a distinct status within their collections, to facilitate discovery.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Roles",
                "sec_num": "4.2"
            },
            {
                "text": "Funding agencies. To be successful, the Human Language Project would require substantial funds, possibly drawing on a constellation of public and private agencies in many countries. However, in the spirit of starting small, and starting now, agencies could require that sponsored projects which collect texts and build lexicons contribute them to the Language Commons. After all, the most effective time to do translation, alignment, and lexicon work is often at the point when primary data is first collected, and this extra work promises direct benefits to the individual project.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Roles",
                "sec_num": "4.2"
            },
            {
                "text": "Seed corpus. The central challenge, we believe, is getting critical mass. Data attracts data, and if one can establish a sufficient seed, the effort will snowball. We can make some concrete proposals as to how to collect a seed. Language resources on the web are one source-the Cr\u00fabad\u00e1n project has identified resources for 400 languages, for example (Scannell, 2008) ; the New Testament of the Bible exists in about 1200 languages and contains of the order of 100k words. We hope that existing efforts that are already well-disposed toward electronic distribution will participate. We particularly mention the Language and Culture Archive of the Summer Institute of Linguistics, and the Rosetta Project. The latter is already distributed through the Internet Archive and contains material for 2500 languages.",
                "cite_spans": [
                    {
                        "start": 351,
                        "end": 367,
                        "text": "(Scannell, 2008)",
                        "ref_id": "BIBREF16"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Early tasks",
                "sec_num": "4.3"
            },
            {
                "text": "Resource discovery. Existing language resources need to be documented, a large un-dertaking that depends on widely distributed knowledge. Existing published corpora from the LDC, ELRA and dozens of other sources-a total of 85,000 items-are already documented in the combined catalog of the Open Language Archives Community, 3 so there is no need to recreate this information. Other resources can be logged by community members using a public access wiki, with a metadata template to ensure key fields are elicited such as resource owner, license, ISO 639 language code(s), and data type. This information can itself be curated and stored in the form of an OLAC archive, to permit search over the union of the existing and newly documented items. Work along these lines has already been initiated by LDC and ELRA (Cieri et al., 2010) .",
                "cite_spans": [
                    {
                        "start": 812,
                        "end": 832,
                        "text": "(Cieri et al., 2010)",
                        "ref_id": "BIBREF4"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Early tasks",
                "sec_num": "4.3"
            },
            {
                "text": "Resource classification. Editors with knowledge of particular language families will categorize documented resources relative to the needs of the project, using controlled vocabularies. This involves examining a resource, determining the granularity and provenance of the segmentation and alignment, checking its ISO 639 classifications, assigning it to a logarithmic size category, documenting its format and layout, collecting sample files, and assigning a priority score.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Early tasks",
                "sec_num": "4.3"
            },
            {
                "text": "Acquisition. Where necessary, permission will be sought to lodge the resource in the repository. Funding may be required to buy the rights to the resource from its owner, as compensation for lost revenue from future data sales. Funding may be required to translate the source into a reference language. The repository's ingestion process is followed, and the resource metadata is updated.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Early tasks",
                "sec_num": "4.3"
            },
            {
                "text": "Text collection. Languages for which the available resources are inadequate are identified, and the needs are prioritized, based on linguistic and geographical diversity. Sponsorship is sought for collecting bilingual texts in high priority languages. Workflows are developed for languages based on a variety of factors, such as availability of educated people with native-level proficiency in their mother tongue and good knowledge of a reference language, internet access in the language area, availability of expatriate speakers in a first-world context, and so forth. A classification scheme is required to help predict which workflows will be most successful in a given situation.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Early tasks",
                "sec_num": "4.3"
            },
            {
                "text": "Audio protocol. The challenge posed by languages with no written literature should not be underestimated. A promising collection method is Basic Oral Language Documentation, which calls for inexpensive voice recorders and netbooks, project-specific software for transcription and sentence-aligned translation, network bandwidth for upload to the repository, and suitable training and support throughout the process.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Early tasks",
                "sec_num": "4.3"
            },
            {
                "text": "Corpus readers. Software developers will inspect the file formats and identify high priority formats based on information about resource priorities and sizes. They will code a corpus reader, an open source reference implementation for converting between corpus formats and the storage model presented in section 3.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Early tasks",
                "sec_num": "4.3"
            },
            {
                "text": "There are many additional difficulties that could be listed, though we expect they can be addressed over time, once a sufficient seed corpus is established. Two particular issues deserve further comment, however.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Further challenges",
                "sec_num": "4.4"
            },
            {
                "text": "Licenses. Intellectual property issues surrounding linguistic corpora present a complex and evolving landscape (DiPersio, 2010). For users, it would be ideal for all materials to be available under a single license that permits derivative works, commercial use, and redistribution, such as the Creative Commons Attribution License (CC-BY). There would be no confusion about permissible uses of subsets and aggregates of the collected corpora, and it would be easy to view the Universal Corpus as a single corpus. But to attract as many data contributors as possible, we cannot make such a license a condition of contribution.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Further challenges",
                "sec_num": "4.4"
            },
            {
                "text": "Instead, we propose to distinguish between: (1) a digital Archive of contributed corpora that are stored in their original format and made available under a range of licenses, offering preservation and dissemination services to the language resources community at large (i.e. the Language Commons); and (2) the Universal Corpus, which is embodied as programmatic access to an evolving subset of materials from the archive under one of a small set of permissive licenses, licenses whose unions and intersections are understood (e.g. CC-BY and its non-commercial counterpart CC-BY-NC). Apart from being a useful service in its own right, the Archive would provide a staging ground for the Universal Corpus. Archived corpora having restrictive licenses could be evaluated for their potential as contributions to the Corpus, making it possible to prioritize the work of negotiating more liberal licenses.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Further challenges",
                "sec_num": "4.4"
            },
            {
                "text": "There are reasons to distinguish Archive and Corpus even beyond the license issues. The Corpus, but not the Archive, is limited to the formats that support automatic cross-linguistic processing. Conversely, since the primary interface to the Corpus is programmatic, it may include materials that are hosted in many different archives; it only needs to know how to access and deliver them to the user. Incidentally, we consider it an implementation issue whether the Corpus is provided as a web service, a download service with user-side software, user-side software with data delivered on physical media, or a cloud application with user programs executed server-side.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Further challenges",
                "sec_num": "4.4"
            },
            {
                "text": "Expenses of conversion and editing. We do not trivialize the work involved in converting documents to the formats of section 3, and in manually correcting the results of noisy automatic processes such as optical character recognition. Indeed, the amount of work involved is one motivation for the lengths to which we have gone to keep the data format simple. For example, we have deliberately avoided specifying any particular tokenization scheme. Variation will arise as a consequence, but we believe that it will be no worse than the variability in input that current machine translation training methods routinely deal with, and will not greatly injure the utility of the Corpus. The utter simplicity of the formats also widens the pool of potential volunteers for doing the manual work that is required. By avoiding linguistically delicate annotation, we can take advantage of motivated but untrained volunteers such as students and members of speaker communities.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Further challenges",
                "sec_num": "4.4"
            },
            {
                "text": "Nearly twenty years ago, the linguistics community received a wake-up call, when Hale et al. (1992) predicted that 90% of the world's linguistic diversity would be lost or moribund by the year 2100, and warned that linguistics might \"go down in history as the only science that presided obliviously over the disappearance of 90 per cent of the very field to which it is dedicated.\" Today, language documentation is a high priority in mainstream linguistics. However, the field of computa-tional linguistics is yet to participate substantially.",
                "cite_spans": [
                    {
                        "start": 81,
                        "end": 99,
                        "text": "Hale et al. (1992)",
                        "ref_id": "BIBREF8"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "5"
            },
            {
                "text": "The first half century of research in computational linguistics-from circa 1960 up to the present-has touched on less than 1% of the world's languages. For a field which is justly proud of its empirical methods, it is time to apply those methods to the remaining 99% of languages. We will never have the luxury of richly annotated data for these languages, so we are forced to ask ourselves: can we do more with less?",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "5"
            },
            {
                "text": "We believe the answer is \"yes,\" and so we challenge the computational linguistics community to adopt a scalable computational approach to the problem. We need leaner methods for building machine translation systems; new algorithms for cross-linguistic bootstrapping via multiple paths; more effective techniques for leveraging human effort in labeling data; scalable ways to get bilingual text for unwritten languages; and large scale social engineering to make it all happen quickly.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "5"
            },
            {
                "text": "To believe we can build this Universal Corpus is certainly audacious, but not to even try is arguably irresponsible. The initial step parallels earlier efforts to create large machine-readable text collections which began in the 1960s and reverberated through each subsequent decade. Collecting bilingual texts is an orthodox activity, and many alternative conceptions of a Human Language Project would likely include this as an early task.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "5"
            },
            {
                "text": "The undertaking ranks with the largest datacollection efforts in science today. It is not achievable without considerable computational sophistication and the full engagement of the field of computational linguistics. Yet we require no fundamentally new technologies. We can build on our strengths in corpus-based methods, linguistic models, human-and machine-supplied annotations, and learning algorithms. By rising to this, the greatest language challenge of our time, we enable multi-lingual technology development at a new scale, and simultaneously lay the foundations for a new science of empirical universal linguistics.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "5"
            },
            {
                "text": "http://www.sil.org/iso639-3/",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "http://www.language-archives.org/",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            }
        ],
        "back_matter": [
            {
                "text": "We are grateful to Ed Bice, Doug Oard, Gary Simons, participants of the Language Commons working group meeting in Boston, students in the \"Digitizing Languages\" seminar (University of Michigan), and anonymous reviewers, for feedback on an earlier version of this paper.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acknowledgments",
                "sec_num": null
            }
        ],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "Semisupervised Learning for Computational Linguistics",
                "authors": [
                    {
                        "first": "Steven",
                        "middle": [],
                        "last": "Abney",
                        "suffix": ""
                    }
                ],
                "year": 2007,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Steven Abney. 2007. Semisupervised Learning for Computational Linguistics. Chapman & Hall/CRC.",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "Natural Language Processing with Python. O'Reilly Media",
                "authors": [
                    {
                        "first": "Steven",
                        "middle": [],
                        "last": "Bird",
                        "suffix": ""
                    },
                    {
                        "first": "Ewan",
                        "middle": [],
                        "last": "Klein",
                        "suffix": ""
                    },
                    {
                        "first": "Edward",
                        "middle": [],
                        "last": "Loper",
                        "suffix": ""
                    }
                ],
                "year": 2009,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Steven Bird, Ewan Klein, and Edward Loper. 2009. Natural Language Processing with Python. O'Reilly Media. http://nltk.org/book.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "A scalable method for preserving oral literature from small languages",
                "authors": [
                    {
                        "first": "Steven",
                        "middle": [],
                        "last": "Bird",
                        "suffix": ""
                    }
                ],
                "year": 2010,
                "venue": "Proceedings of the 12th International Conference on Asia-Pacific Digital Libraries",
                "volume": "",
                "issue": "",
                "pages": "5--14",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Steven Bird. 2010. A scalable method for preserving oral literature from small languages. In Proceedings of the 12th International Conference on Asia-Pacific Digital Libraries, pages 5-14.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "The IMDI metadata framework, its current application and future direction",
                "authors": [
                    {
                        "first": "Daan",
                        "middle": [],
                        "last": "Broeder",
                        "suffix": ""
                    },
                    {
                        "first": "Peter",
                        "middle": [],
                        "last": "Wittenburg",
                        "suffix": ""
                    }
                ],
                "year": 2006,
                "venue": "International Journal of Metadata, Semantics and Ontologies",
                "volume": "1",
                "issue": "",
                "pages": "119--132",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Daan Broeder and Peter Wittenburg. 2006. The IMDI metadata framework, its current application and fu- ture direction. International Journal of Metadata, Semantics and Ontologies, 1:119-132.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "A road map for interoperable language resource metadata",
                "authors": [
                    {
                        "first": "Christopher",
                        "middle": [],
                        "last": "Cieri",
                        "suffix": ""
                    },
                    {
                        "first": "Khalid",
                        "middle": [],
                        "last": "Choukri",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [
                            "Terence"
                        ],
                        "last": "Nicoletta Calzolari",
                        "suffix": ""
                    },
                    {
                        "first": "Johannes",
                        "middle": [],
                        "last": "Langendoen",
                        "suffix": ""
                    },
                    {
                        "first": "Martha",
                        "middle": [],
                        "last": "Leveling",
                        "suffix": ""
                    },
                    {
                        "first": "Nancy",
                        "middle": [],
                        "last": "Palmer",
                        "suffix": ""
                    },
                    {
                        "first": "James",
                        "middle": [],
                        "last": "Ide",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Pustejovsky",
                        "suffix": ""
                    }
                ],
                "year": 2010,
                "venue": "Proceedings of the 7th International Conference on Language Resources and Evaluation (LREC)",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Christopher Cieri, Khalid Choukri, Nicoletta Calzo- lari, D. Terence Langendoen, Johannes Leveling, Martha Palmer, Nancy Ide, and James Pustejovsky. 2010. A road map for interoperable language re- source metadata. In Proceedings of the 7th Interna- tional Conference on Language Resources and Eval- uation (LREC).",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Perseus Digital Library",
                "authors": [
                    {
                        "first": "R",
                        "middle": [],
                        "last": "Gregory",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Crane",
                        "suffix": ""
                    }
                ],
                "year": 2008,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Gregory R. Crane. 2010. Perseus Digital Library: Research in 2008/09. http://www.perseus. tufts.edu/hopper/research/current. Accessed Feb. 2010.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "GATE: an architecture for development of robust HLT applications",
                "authors": [
                    {
                        "first": "Hamish",
                        "middle": [],
                        "last": "Cunningham",
                        "suffix": ""
                    },
                    {
                        "first": "Diana",
                        "middle": [],
                        "last": "Maynard",
                        "suffix": ""
                    },
                    {
                        "first": "Kalina",
                        "middle": [],
                        "last": "Bontcheva",
                        "suffix": ""
                    },
                    {
                        "first": "Valentin",
                        "middle": [],
                        "last": "Tablan",
                        "suffix": ""
                    }
                ],
                "year": 2002,
                "venue": "Proceedings of 40th Annual Meeting of the Association for Computational Linguistics",
                "volume": "",
                "issue": "",
                "pages": "168--175",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Hamish Cunningham, Diana Maynard, Kalina Bontcheva, and Valentin Tablan. 2002. GATE: an architecture for development of robust HLT appli- cations. In Proceedings of 40th Annual Meeting of the Association for Computational Linguistics, pages 168-175. Association for Computational Linguistics.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "Implications of a permissions culture on the development and distribution of language resources",
                "authors": [
                    {
                        "first": "Denise",
                        "middle": [],
                        "last": "Dipersio",
                        "suffix": ""
                    }
                ],
                "year": 2010,
                "venue": "FLaReNet Forum 2010. Fostering Language Resources Network",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Denise DiPersio. 2010. Implications of a permis- sions culture on the development and distribution of language resources. In FLaReNet Forum 2010. Fostering Language Resources Network. http: //www.flarenet.eu/.",
                "links": null
            },
            "BIBREF8": {
                "ref_id": "b8",
                "title": "Endangered languages. Language",
                "authors": [
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Hale",
                        "suffix": ""
                    },
                    {
                        "first": "L",
                        "middle": [],
                        "last": "Krauss",
                        "suffix": ""
                    },
                    {
                        "first": "A",
                        "middle": [],
                        "last": "Watahomigie",
                        "suffix": ""
                    },
                    {
                        "first": "C",
                        "middle": [],
                        "last": "Yamamoto",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Craig",
                        "suffix": ""
                    }
                ],
                "year": 1992,
                "venue": "",
                "volume": "68",
                "issue": "",
                "pages": "1--42",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Hale, M. Krauss, L. Watahomigie, A. Yamamoto, and C. Craig. 1992. Endangered languages. Language, 68(1):1-42.",
                "links": null
            },
            "BIBREF9": {
                "ref_id": "b9",
                "title": "Language documentation: What is it and what is it good for?",
                "authors": [
                    {
                        "first": "P",
                        "middle": [],
                        "last": "Nikolaus",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Himmelmann",
                        "suffix": ""
                    }
                ],
                "year": 2006,
                "venue": "Jost Gippert, Nikolaus Himmelmann, and Ulrike Mosel",
                "volume": "",
                "issue": "",
                "pages": "1--30",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Nikolaus P. Himmelmann. 2006. Language documen- tation: What is it and what is it good for? In Jost Gippert, Nikolaus Himmelmann, and Ulrike Mosel, editors, Essentials of Language Documenta- tion, pages 1-30. Mouton de Gruyter.",
                "links": null
            },
            "BIBREF10": {
                "ref_id": "b10",
                "title": "The science behind the Human Genome Project",
                "authors": [],
                "year": 2007,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Human Genome Project. 2007. The science behind the Human Genome Project. http: //www.ornl.gov/sci/techresources/ Human_Genome/project/info.shtml. Accessed Dec. 2007.",
                "links": null
            },
            "BIBREF11": {
                "ref_id": "b11",
                "title": "An Introduction to Machine Translation",
                "authors": [
                    {
                        "first": "W",
                        "middle": [],
                        "last": "",
                        "suffix": ""
                    },
                    {
                        "first": "John",
                        "middle": [],
                        "last": "Hutchins",
                        "suffix": ""
                    },
                    {
                        "first": "Harold",
                        "middle": [
                            "L"
                        ],
                        "last": "Somers",
                        "suffix": ""
                    }
                ],
                "year": 1992,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "W. John Hutchins and Harold L. Somers. 1992. An In- troduction to Machine Translation. Academic Press.",
                "links": null
            },
            "BIBREF12": {
                "ref_id": "b12",
                "title": "Statistical Machine Translation",
                "authors": [
                    {
                        "first": "Philipp",
                        "middle": [],
                        "last": "Koehn",
                        "suffix": ""
                    }
                ],
                "year": 2010,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Philipp Koehn. 2010. Statistical Machine Translation. Cambridge University Press.",
                "links": null
            },
            "BIBREF13": {
                "ref_id": "b13",
                "title": "Improving word alignment with bridge languages",
                "authors": [
                    {
                        "first": "Shankar",
                        "middle": [],
                        "last": "Kumar",
                        "suffix": ""
                    },
                    {
                        "first": "Franz",
                        "middle": [
                            "J"
                        ],
                        "last": "Och",
                        "suffix": ""
                    },
                    {
                        "first": "Wolfgang",
                        "middle": [],
                        "last": "Macherey",
                        "suffix": ""
                    }
                ],
                "year": 2007,
                "venue": "Proceedings of the 2007 Joint Conference on Empirical Methods in Natural Language Processing and Computational Natural Language Learning (EMNLP-CoNLL)",
                "volume": "",
                "issue": "",
                "pages": "42--50",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Shankar Kumar, Franz J. Och, and Wolfgang Macherey. 2007. Improving word alignment with bridge languages. In Proceedings of the 2007 Joint Conference on Empirical Methods in Natural Lan- guage Processing and Computational Natural Lan- guage Learning (EMNLP-CoNLL), pages 42-50, Prague, Czech Republic. Association for Computa- tional Linguistics.",
                "links": null
            },
            "BIBREF14": {
                "ref_id": "b14",
                "title": "Frontiers in linguistic annotation for lower-density languages",
                "authors": [
                    {
                        "first": "Mike",
                        "middle": [],
                        "last": "Maxwell",
                        "suffix": ""
                    },
                    {
                        "first": "Baden",
                        "middle": [],
                        "last": "Hughes",
                        "suffix": ""
                    }
                ],
                "year": 2006,
                "venue": "Proceedings of the Workshop on Frontiers in Linguistically Annotated Corpora",
                "volume": "",
                "issue": "",
                "pages": "29--37",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Mike Maxwell and Baden Hughes. 2006. Frontiers in linguistic annotation for lower-density languages. In Proceedings of the Workshop on Frontiers in Lin- guistically Annotated Corpora 2006, pages 29-37, Sydney, Australia, July. Association for Computa- tional Linguistics.",
                "links": null
            },
            "BIBREF15": {
                "ref_id": "b15",
                "title": "The Bible as a parallel corpus: Annotating the 'book of 2000 tongues'. Computers and the Humanities",
                "authors": [
                    {
                        "first": "Philip",
                        "middle": [],
                        "last": "Resnik",
                        "suffix": ""
                    },
                    {
                        "first": "Mari",
                        "middle": [
                            "Broman"
                        ],
                        "last": "Olsen",
                        "suffix": ""
                    },
                    {
                        "first": "Mona",
                        "middle": [],
                        "last": "Diab",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "",
                "volume": "33",
                "issue": "",
                "pages": "129--153",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Philip Resnik, Mari Broman Olsen, and Mona Diab. 1999. The Bible as a parallel corpus: Annotating the 'book of 2000 tongues'. Computers and the Hu- manities, 33:129-153.",
                "links": null
            },
            "BIBREF16": {
                "ref_id": "b16",
                "title": "The Cr\u00fabad\u00e1n Project: Corpus building for under-resourced languages",
                "authors": [
                    {
                        "first": "Kevin",
                        "middle": [],
                        "last": "Scannell",
                        "suffix": ""
                    }
                ],
                "year": 2008,
                "venue": "Cahiers du Cental 5: Proceedings of the 3rd Web as Corpus Workshop",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Kevin Scannell. 2008. The Cr\u00fabad\u00e1n Project: Corpus building for under-resourced languages. In Cahiers du Cental 5: Proceedings of the 3rd Web as Corpus Workshop.",
                "links": null
            },
            "BIBREF17": {
                "ref_id": "b17",
                "title": "The Open Language Archives Community: An infrastructure for distributed archiving of language resources. Literary and Linguistic Computing",
                "authors": [
                    {
                        "first": "Gary",
                        "middle": [],
                        "last": "Simons",
                        "suffix": ""
                    },
                    {
                        "first": "Steven",
                        "middle": [],
                        "last": "Bird",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "",
                "volume": "18",
                "issue": "",
                "pages": "117--128",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Gary Simons and Steven Bird. 2003. The Open Lan- guage Archives Community: An infrastructure for distributed archiving of language resources. Liter- ary and Linguistic Computing, 18:117-128.",
                "links": null
            },
            "BIBREF18": {
                "ref_id": "b18",
                "title": "Towards greater accuracy in lexicostatistic dating",
                "authors": [
                    {
                        "first": "Morris",
                        "middle": [],
                        "last": "Swadesh",
                        "suffix": ""
                    }
                ],
                "year": 1955,
                "venue": "International Journal of American Linguistics",
                "volume": "21",
                "issue": "",
                "pages": "121--137",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Morris Swadesh. 1955. Towards greater accuracy in lexicostatistic dating. International Journal of American Linguistics, 21:121-137.",
                "links": null
            },
            "BIBREF19": {
                "ref_id": "b19",
                "title": "CLARIN: common language resources and technology infrastructure",
                "authors": [
                    {
                        "first": "Tam\u00e1s",
                        "middle": [],
                        "last": "V\u00e1radi",
                        "suffix": ""
                    },
                    {
                        "first": "Steven",
                        "middle": [],
                        "last": "Krauwer",
                        "suffix": ""
                    },
                    {
                        "first": "Peter",
                        "middle": [],
                        "last": "Wittenburg",
                        "suffix": ""
                    },
                    {
                        "first": "Martin",
                        "middle": [],
                        "last": "Wynne",
                        "suffix": ""
                    },
                    {
                        "first": "Kimmo",
                        "middle": [],
                        "last": "Koskenniemi",
                        "suffix": ""
                    }
                ],
                "year": 2008,
                "venue": "Proceedings of the Sixth International Language Resources and Evaluation Conference. European Language Resources Association",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Tam\u00e1s V\u00e1radi, Steven Krauwer, Peter Wittenburg, Martin Wynne, and Kimmo Koskenniemi. 2008. CLARIN: common language resources and technol- ogy infrastructure. In Proceedings of the Sixth Inter- national Language Resources and Evaluation Con- ference. European Language Resources Association.",
                "links": null
            },
            "BIBREF20": {
                "ref_id": "b20",
                "title": "Literacy, Culture, and Development: Becoming Literate in Morocco",
                "authors": [
                    {
                        "first": "Daniel",
                        "middle": [
                            "A"
                        ],
                        "last": "Wagner",
                        "suffix": ""
                    }
                ],
                "year": 1993,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Daniel A. Wagner. 1993. Literacy, Culture, and Devel- opment: Becoming Literate in Morocco. Cambridge University Press.",
                "links": null
            },
            "BIBREF21": {
                "ref_id": "b21",
                "title": "Local Literacies: Theory and Practice",
                "authors": [
                    {
                        "first": "Glenys",
                        "middle": [],
                        "last": "Waters",
                        "suffix": ""
                    }
                ],
                "year": 1998,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Glenys Waters. 1998. Local Literacies: Theory and Practice. Summer Institute of Linguistics, Dallas.",
                "links": null
            },
            "BIBREF22": {
                "ref_id": "b22",
                "title": "Endangered language families",
                "authors": [
                    {
                        "first": "H",
                        "middle": [],
                        "last": "Douglas",
                        "suffix": ""
                    },
                    {
                        "first": "Gary",
                        "middle": [],
                        "last": "Whalen",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Simons",
                        "suffix": ""
                    }
                ],
                "year": 2009,
                "venue": "Proceedings of the 1st International Conference on Language Documentation and Conservation. University of Hawaii",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Douglas H. Whalen and Gary Simons. 2009. En- dangered language families. In Proceedings of the 1st International Conference on Language Docu- mentation and Conservation. University of Hawaii. http://hdl.handle.net/10125/5017.",
                "links": null
            },
            "BIBREF23": {
                "ref_id": "b23",
                "title": "The Cambridge Handbook of Endangered Languages",
                "authors": [
                    {
                        "first": "C",
                        "middle": [],
                        "last": "Anthony",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Woodbury",
                        "suffix": ""
                    }
                ],
                "year": 2010,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Anthony C. Woodbury. 2010. Language documenta- tion. In Peter K. Austin and Julia Sallabank, edi- tors, The Cambridge Handbook of Endangered Lan- guages. Cambridge University Press.",
                "links": null
            },
            "BIBREF24": {
                "ref_id": "b24",
                "title": "Multilingual structural projection across interlinearized text",
                "authors": [
                    {
                        "first": "Fei",
                        "middle": [],
                        "last": "Xia",
                        "suffix": ""
                    },
                    {
                        "first": "D",
                        "middle": [],
                        "last": "William",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Lewis",
                        "suffix": ""
                    }
                ],
                "year": 2007,
                "venue": "Proceedings of the Meeting of the North American Chapter of the Association for Computational Linguistics (NAACL)",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Fei Xia and William D. Lewis. 2007. Multilingual structural projection across interlinearized text. In Proceedings of the Meeting of the North American Chapter of the Association for Computational Lin- guistics (NAACL). Association for Computational Linguistics.",
                "links": null
            }
        },
        "ref_entries": {}
    }
}