File size: 59,917 Bytes
6fa4bc9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
{
    "paper_id": "U07-1005",
    "header": {
        "generated_with": "S2ORC 1.0.0",
        "date_generated": "2023-01-19T03:08:48.508469Z"
    },
    "title": "Measuring Correlation Between Linguists' Judgments and Latent Dirichlet Allocation Topics",
    "authors": [
        {
            "first": "Ari",
            "middle": [],
            "last": "Chanen",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "University of Sydney Sydney",
                "location": {
                    "postCode": "2007",
                    "country": "Australia"
                }
            },
            "email": ""
        },
        {
            "first": "Jon",
            "middle": [],
            "last": "Patrick",
            "suffix": "",
            "affiliation": {
                "laboratory": "",
                "institution": "University of Sydney Sydney",
                "location": {
                    "postCode": "2007",
                    "country": "Australia"
                }
            },
            "email": ""
        }
    ],
    "year": "",
    "venue": null,
    "identifiers": {},
    "abstract": "Data that has been annotated by linguists is often considered a gold standard on many tasks in the NLP field. However, linguists are expensive so researchers seek automatic techniques that correlate well with human performance. Linguists working on the ScamSeek project were given the task of deciding how many and which document classes existed in this previously unseen corpus. This paper investigates whether the document classes identified by the linguists correlate significantly with Latent Dirichlet Allocation (LDA) topics induced from that corpus. Monte-Carlo simulation is used to measure the statistical significance of the correlation between LDA models and the linguists' characterisations. In experiments, more than 90% of the linguists' classes met the level required to declare the correlation between linguistic insights and LDA models is significant. These results help verify the usefulness of the LDA model in NLP and are a first step in showing that the LDA model can replace the efforts of linguists in certain tasks like subdividing a corpus into classes.",
    "pdf_parse": {
        "paper_id": "U07-1005",
        "_pdf_hash": "",
        "abstract": [
            {
                "text": "Data that has been annotated by linguists is often considered a gold standard on many tasks in the NLP field. However, linguists are expensive so researchers seek automatic techniques that correlate well with human performance. Linguists working on the ScamSeek project were given the task of deciding how many and which document classes existed in this previously unseen corpus. This paper investigates whether the document classes identified by the linguists correlate significantly with Latent Dirichlet Allocation (LDA) topics induced from that corpus. Monte-Carlo simulation is used to measure the statistical significance of the correlation between LDA models and the linguists' characterisations. In experiments, more than 90% of the linguists' classes met the level required to declare the correlation between linguistic insights and LDA models is significant. These results help verify the usefulness of the LDA model in NLP and are a first step in showing that the LDA model can replace the efforts of linguists in certain tasks like subdividing a corpus into classes.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Abstract",
                "sec_num": null
            }
        ],
        "body_text": [
            {
                "text": "Since linguists are expensive to employ, there is a preference in most NLP projects to use automatic processes especially where it can be shown that the automatic process approaches the performance of the linguists. Several linguists were used on the ScamSeek project (Patrick, 2006) . ScamSeek was created for the Australian Securities and Investments Commission (ASIC) government agency to identify financial scam websites based on the linguistic properties of the webpage content. A major task they performed by the project linguists was to partition the corpus into classes. Besides defining the classes in terms of the documents assigned to them, the linguists also identified phrases they believed were indicative of each class. The LDA corpus model (Blei, 2004) can automatically generate a likely set of corpus topics and subdivide the corpus words among those topics. We will show that there are similarities between the task the LDA performs and the tasks the ScamSeek linguists performed. This paper attempts to determine to what degree LDA topics correlate with the judgments of linguists in partitioning a corpus into document classes.",
                "cite_spans": [
                    {
                        "start": 268,
                        "end": 283,
                        "text": "(Patrick, 2006)",
                        "ref_id": "BIBREF6"
                    },
                    {
                        "start": 756,
                        "end": 768,
                        "text": "(Blei, 2004)",
                        "ref_id": "BIBREF1"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "Formally, we set a null hypothesis, H 0 , to claim that the relationship between the linguists' document classes and LDA topics is random. The alternative hypothesis, H a , claims those document classes and the topics have a significant amount of correspondence or correlation between them. In order to measure how significant the correlation is, principled methods of measuring the statistical significance of the correlation must be found. If the pvalue for the correlation between a document class and the best correlating topic for that class is less than \u03b1 = 0.05, then H 0 will be rejected in favor of H a . The determination of the p-values are discussed in the Methods section.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Introduction",
                "sec_num": "1"
            },
            {
                "text": "The LDA is a Bayesian, generative corpus model which posits a corpus wide set of k topics from which the words of each document are generated. In this model, a topic is a multinomial distribution over terms. According to the LDA model, an author first determines, through a random process, the topic proportions of a new document. Thereafter, the author chooses a topic for the next word and then draws that word randomly according to the chosen topic distribution.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Background 2.1 LDA Model",
                "sec_num": "2"
            },
            {
                "text": "The LDA model can be represented as a graphical model as shown in figure 1. Graphical models represent the dependencies between probabilistic model hyper-parameters and variables. A good introduction can be found in (Buntine, 1995) . The LDA model includes two hyper-parameters, \u03b1 and \u03b2 as well as three random variables (RV's), \u03b8 1:D , z and w, where D is the number of corpus of documents. Figure 1 : The LDA graphical model \u03b1 takes a scalar value that affects the amount of smoothing of the symmetric Dirichlet (dir) distribution that produces the multinomial (multi) distributed \u03b8 m , representing the topic proportions for document m. The hyper-parameter \u03b2 is a k \u00d7 V matrix of probabilities where V is the size of the corpus vocabulary. Each row of \u03b2 is a topic multinomial where \u03b2 ij = p(w = j|z = i). The RV z is an index variable that indicates which topic was chosen for each document word (Steyvers and Griffiths, 2005) (Blei, 2004) . Formally, each document m is assumed to be formed by the following generative steps:",
                "cite_spans": [
                    {
                        "start": 216,
                        "end": 231,
                        "text": "(Buntine, 1995)",
                        "ref_id": "BIBREF3"
                    },
                    {
                        "start": 900,
                        "end": 930,
                        "text": "(Steyvers and Griffiths, 2005)",
                        "ref_id": "BIBREF7"
                    },
                    {
                        "start": 931,
                        "end": 943,
                        "text": "(Blei, 2004)",
                        "ref_id": "BIBREF1"
                    }
                ],
                "ref_spans": [
                    {
                        "start": 392,
                        "end": 400,
                        "text": "Figure 1",
                        "ref_id": null
                    }
                ],
                "eq_spans": [],
                "section": "Background 2.1 LDA Model",
                "sec_num": "2"
            },
            {
                "text": "\u03b2 \u03b8 z w m,n m,n N m \u03b1 M m",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Background 2.1 LDA Model",
                "sec_num": "2"
            },
            {
                "text": "1. Choose proportions \u03b8 m |\u03b1 \u223c Dir(\u03b1). 2. For n \u2208 {1, \u2022 \u2022 \u2022 , N m }: (a) Choose topic z m,n \u223c M ulti(\u03b8 m ) (b) Choose word w m,n from p(w m,n |z m,n , \u03b2 zm,n )",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Background 2.1 LDA Model",
                "sec_num": "2"
            },
            {
                "text": "where N m is the number of words in document m. Under graphical model notation, shaded elements are observed and unshaded elements are latent. Thus, the circle denoting the w element, representing the words of a document, is the only observed element. The other elements are latent. In order for the LDA model to be useful in practical settings, these latent RV's and hyper-parameters need to be estimated.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Background 2.1 LDA Model",
                "sec_num": "2"
            },
            {
                "text": "If \u03b1 and \u03b2 are assumed fixed, then the posterior probability w.r.t. \u03b8 and z can be expressed as follows:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Background 2.1 LDA Model",
                "sec_num": "2"
            },
            {
                "text": "p(\u03b8, z|w, \u03b1, \u03b2) = p(w|\u03b8, z, \u03b1, \u03b2)p(\u03b8, z|\u03b1, \u03b2) p(w|\u03b1, \u03b2) = p(\u03b8, z, w|\u03b1, \u03b2) \u03b8 z p(\u03b8, z, w|\u03b1, \u03b2) d\u03b8",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Background 2.1 LDA Model",
                "sec_num": "2"
            },
            {
                "text": "Unfortunately, this posterior probability is intractable to calculate due to the integral over the Dirichlet variable. There are several methods for approximating \u03b8 and z. The LDA topic data used in this research was induced using the mean field variational method which is an iterative algorithm that converges on estimates of \u03b8 and z for each document and each word in those documents. Once these estimates have been obtained, then estimates for \u03b1 and \u03b2 can be obtained by holding the values of \u03b8 and z fixed and using an empirical bayes estimation technique. By alternating between the mean field variational estimation and the empirical bayes estimate the values of the latent elements are guaranteed to eventually converge to stable values. For further details on this latent element estimation technique see (Blei, 2004) .",
                "cite_spans": [
                    {
                        "start": 814,
                        "end": 826,
                        "text": "(Blei, 2004)",
                        "ref_id": "BIBREF1"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Background 2.1 LDA Model",
                "sec_num": "2"
            },
            {
                "text": "In the experiment section the topic proportions \u03b8 1:D of each document and the topic rows of \u03b2 will be compared to similar data produced by linguists. Table 1 shows the 25 top terms from four sample topics induced from the ScamSeek corpus for a 64 topic model. The top terms are constructed by sorting a topic's multinomial terms by term probability in descending order. The first row of the table shows the name of the linguists' document class that is most correlated 1 with the topic terms shown in the rows below. The last row shows the cumulative probability mass that the top 25 topic words account for. Three of these example topics are most associated with scam classes. Only the topic most associated with the Licensed Operator class is a nonscam class. A good indicator of this is that the word, \"risk\", is one of the most probable terms. ",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 151,
                        "end": 158,
                        "text": "Table 1",
                        "ref_id": "TABREF1"
                    }
                ],
                "eq_spans": [],
                "section": "Background 2.1 LDA Model",
                "sec_num": "2"
            },
            {
                "text": "In this research, we want to measure the strength of the correlations between classes and topics. One challenge of this task is that the classes and the topics are in different forms and the topics are nonparametric distributions. We achieve this aim by utilizing one form of the Monte Carlo Simulation method where a number of random pseudo-LDA models are produced. The correlations between the linguists classes and both the real LDA model as well as the pseudo-models are measured. The correlation scores between all the pseudo-models and the linguists classes are sorted and the real model's correlation score is ranked against the pseudo-models. The percentage of pseudo-model scores that the real model score beats is taken to be the significance level of the real correlation. Let the correlation between the classes and the LDA topics be called the real correlation. From the ranking of the real correlation within all the random correlations an approximate p-value is derived. Let r be the number of random correlations that are the same or better than the real correlation and let n be the number of random models. Then 2 : North and Sham, 2002) report that using Monte-Carlo procedures to calculate empirical pvalues has become commonplace in statistical analysis and give three major motivating factors:",
                "cite_spans": [
                    {
                        "start": 1134,
                        "end": 1155,
                        "text": "North and Sham, 2002)",
                        "ref_id": "BIBREF0"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Monte-Carlo Simulation",
                "sec_num": "2.2"
            },
            {
                "text": "p-value = r/n (B. V.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Monte-Carlo Simulation",
                "sec_num": "2.2"
            },
            {
                "text": "1. Many test statistics do not have a standard asymptotic distribution. 2. Even if such a distribution does exist, it may not be reliable in realistic sample sizes. 3. Calculation of the exact sampling distribution through exhaustive enumeration of all possible samples may be too computationally intensive.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Monte-Carlo Simulation",
                "sec_num": "2.2"
            },
            {
                "text": "Reason #1 definitely applies to the case of trying to find a distribution for possible LDA models. The LDA estimation algorithm is nonparametric itself so there is no reason to think it would produce topic multinomials that fit a parametric distribution. Reason #2 does not apply. Reason #3 is a major factor for using Monte-Carlo techniques in the case of this research. Each randomised topic has N = 18, 000 terms. To randomise a LDA model each topic has its terms and probabilities shuffled in a pseudorandom matter. There are N ! different shuffles for each topic which is for all practical purposes infinite in this case.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Monte-Carlo Simulation",
                "sec_num": "2.2"
            },
            {
                "text": "The LDA generative corpus model assumes that every corpus document draws its terms from \u03ba topics, where \u03ba is a parameter of the LDA model. One of the products of the LDA model estimation process is a \u03b3-vector for each document which gives the estimated distribution of a document's terms over the topics. Normalizing this vector by dividing by the total number of document terms gives the document topic proportions which is the same information that the LDA model's \u03b8 m RV represents for a given document m. Unlike topics, the document classes the linguists constructed are meant to be mutually exclusive; a document may belong to one and only one of those classes. Although this is a significant difference between topics and these document classes, in practice the two are not too dissimilar. An analysis of all the normalised \u03b3-vectors shows that, on average, each document devotes around 60% of its terms to a major topic, and allocates between 4-20% of its remaining content to each of four or five minor topics, leaving only small amounts of the topic mass to the rest of the topics. This pattern seems to hold irrespective of the number of topics used to generate the LDA model, as table 2 shows. Since most documents have a single topic with more than a majority of the topic mass, we will assume that topics can approximate the behavior of document classes. In addition to creating document classes, the linguists also created motif classes to embody certain qualities of documents that transcend the document classes. In this way, the motifs are closer to topics than document classes. The linguists identified char-acteristic phrases for the motif classes just as they did for the document classes. An example of a motif class is one called the persuasion class which has indicative phrases that are common to many scams in which a scammer tries to persuade victims to do something. Many of the scam documents exhibit some of these persuasion phrases. Unfortunately, exact phrases cannot be revealed because parts of the ScamSeek project are proprietary.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Similarities and differences between document classes and LDA topics",
                "sec_num": "3"
            },
            {
                "text": "For the remainder of the paper, the term classes will be used to signify both document classes and motif classes.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Similarities and differences between document classes and LDA topics",
                "sec_num": "3"
            },
            {
                "text": "Two types of methods were employed to estimate a p-value for the correlation between the linguists classes and the LDA topics: categorical and termbased. The categorical method attempts to measure the randomness in the relationship between the topics and the linguists' document classes. The termbased methods measure correlations between word distributions in the LDA topics and the linguists' class characteristic phrases.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Methods",
                "sec_num": "4"
            },
            {
                "text": "LDA models were generated on 1917 documents from the ScamSeek corpus. Eight models were induced with the following numbers of topics: 2, 4, 8, 16, 64, 128, 256. These models are referred to as the \"real\" models to differentiate them from the random LDA models introduced below.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Methods",
                "sec_num": "4"
            },
            {
                "text": "The \u03c7 2 test (Devore, 1999) can be used to test if two categorical variables are statistically independent. A contingency table is used to show the counts of some entity for every possible pairing of categories, one from each of the two variables. The empirical counts are compared to the counts that would be expected if the two variables were independent. The \u03c7 2 experiments described in this section only utilise the document classes and not the motif classes.",
                "cite_spans": [
                    {
                        "start": 13,
                        "end": 27,
                        "text": "(Devore, 1999)",
                        "ref_id": "BIBREF4"
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Using the \u03c7 2 test",
                "sec_num": "4.1"
            },
            {
                "text": "The raw LDA \u03b3-vectors give a document's term count for each topic therefore topics are categorical in this context. To make a document class into a categorical variable, the \u03b3-vectors for all the documents in the same document class can be summed so that each cell contains the total term count for one topic over all the documents in that class. Then, each cell (i, j) of the \u03c7 2 contingency table will hold the total number of words from document class i that were assigned to topic j.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Using the \u03c7 2 test",
                "sec_num": "4.1"
            },
            {
                "text": "There is one problem with using the \u03c7 2 test in this setting. Completely correct usage of the \u03c7 2 test requires that each joint event from the contingency table is independent of all the others. However, according to (Blei, 2004, pg. 20) , under LDA, the terms of the document are exchangeable, meaning that their order does not matter. This implies the terms are not independent of each other but rather conditionally independent with respect to the latent topics. Because of this potential problem, any results must be viewed with some caution.",
                "cite_spans": [
                    {
                        "start": 217,
                        "end": 237,
                        "text": "(Blei, 2004, pg. 20)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Using the \u03c7 2 test",
                "sec_num": "4.1"
            },
            {
                "text": "The \u03c7 2 statistic was calculated using each of the eight LDA models to determine the relationship between the document classes and the topics. These tests all indicated that the relationship was highly significant with a p-value of zero.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Using the \u03c7 2 test",
                "sec_num": "4.1"
            },
            {
                "text": "To verify this result, control experiments were performed where 10 random test sets were generated by shuffling the documents assigned to each class. The \u03c7 2 test was run on each of the randomised sets. For the random sets, the \u03c7 2 statistic was much lower than the value obtained from the real class assignments. Unexpectedly, the calculated p-value was still zero, indicating that even the randomised tests were highly significant.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Using the \u03c7 2 test",
                "sec_num": "4.1"
            },
            {
                "text": "We concluded that this method of applying the \u03c7 2 test was not appropriate for the task of rejecting H 0 , and that the most likely reason is that the document words are not completely independent.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Using the \u03c7 2 test",
                "sec_num": "4.1"
            },
            {
                "text": "Next, we turn to a term-based method of trying to verify the H a hypothesis, using word distribution correlations between topics and classes rather than a categorical analysis. To test this hypothesis Monte-Carlo simulation was used as described in the Background section 2.2. Futher details are provided in there section.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Using Monte-Carlo Simulation",
                "sec_num": "4.2"
            },
            {
                "text": "Again in this method, an approximate p-value is calculated from the ranking of real correlations within a sorted list of pseudo-correlations. The real correlations are between the words of the linguists' class characteristic phrases and real LDA topics while the pseudo-correlations are between those phrase words and a set of randomly generated pseudo-topics.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Using Monte-Carlo Simulation",
                "sec_num": "4.2"
            },
            {
                "text": "To begin with, for each of the eight real models (models with 2, 4, 8, 16, 32, 64, 128, 256 topics) , one hundred randomised models were generated. Real LDA models have topics that concentrate most of their probability mass on a relatively small number of terms compared to the total number of terms in the distribution. The method of randomization was chosen so as to maintain the same level of probabilistic \"clumpiness\" in the random topics. To form a pseudo-random LDA model from a real model, for each real topic, the terms and their probabilities are separated. To form a pseudo-topic, the terms are shuffled and assigned to one of the pre-existing multinomial probabilities from the real model's corresponding topic.",
                "cite_spans": [
                    {
                        "start": 49,
                        "end": 99,
                        "text": "(models with 2, 4, 8, 16, 32, 64, 128, 256 topics)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Forming the random LDA models",
                "sec_num": "4.2.1"
            },
            {
                "text": "Again, we are trying to rank the best correlation of a real topic with a class among the correlations of that class with the best correlations among all the pseudo-topics in each randomised LDA model. This section defines some notation needed in discussing these class/topic correlations. This notation assumes a specific model (defined by the number of topics) and a specific correlation measure have been chosen. Different kinds of correlation measures will be explained below.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Correlating one class with one LDA topic",
                "sec_num": "4.2.2"
            },
            {
                "text": "Below, classes are referred to with the index i. Topics are referred to with the index k. An index of r refers to the one real model while an integer index j refers to one of the 100 random models.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Correlating one class with one LDA topic",
                "sec_num": "4.2.2"
            },
            {
                "text": "In our notation, C irk , refers to the correlation of the ith class and real model's kth topic and C ijk refers to the correlation of the ith class and jth random model's kth topic.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Correlating one class with one LDA topic",
                "sec_num": "4.2.2"
            },
            {
                "text": "In order to obtain the p-value for each class, correlation measures are calculated for each pairing of class and topic, both real and random. First C irk is calculated for the one real model. Next, C ijk is calculated for each of the hundred random LDA models. The real topic that shows the best correlation score with class i is, C ir . Next, the procedure is performed on each of the 100 random LDA models so a correlation C ijk between the class and each pseudo-topic k in each random model j is calculated. The best correlation for each random model C ij is found. The best correlations for each random model are sorted from least correlated to most correlated. Then the rank of the best real topic correlation is found within the sorted list of random best correlations. Since our criteria for significance is \u03b1 = 0.05 then for a given number of topics, type of correlation measure and class i, if:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Correlating one class with one LDA topic",
                "sec_num": "4.2.2"
            },
            {
                "text": "C ir > C ij",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Correlating one class with one LDA topic",
                "sec_num": "4.2.2"
            },
            {
                "text": "for 95 of the 100 random models then we would take this as sufficient evidence that the null hypothesis can be rejected in favor of the alternative hypothesis.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Correlating one class with one LDA topic",
                "sec_num": "4.2.2"
            },
            {
                "text": "The following subsections first define a method for forming multinomial distributions from class indicative phrase and next specifies three correlation measures defined on two multinomial distributions over the same range of terms.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Correlating one class with one LDA topic",
                "sec_num": "4.2.2"
            },
            {
                "text": "The LDA topics are multinomial distributions over 18,000 terms. One way to correlate a class with these topics is to form a multinomial distribution from the class. The phrases that the linguists generated as being characteristic of the class can be used to achieve this goal. All the phrases are treated as though they came from a single document and processed in the same way the corpus documents were processed before the LDA models were built from them. This means joining terms together into multiword expressions (MWE) where appropriate and eliminating stopwords. Next, a histogram is formed with the terms and MWE's as elements. Finally, the count for each element is normalised by the total number of elements, thus yielding a probability distribution.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "A distribution from class phrases",
                "sec_num": "4.2.3"
            },
            {
                "text": "Now that a distribution, C i , has been formed for each class i, we can correlate them with each topic distribution, T k . One way to do this is by treating the two distributions as vectors in term space. The cosine of the angle between these two vectors can be seen as a measure of how similar the two distributions are. If the angle is zero then the two distributions are the same whereas if they are perpendicular they are maximally dissimilar. The cosine of the angle, \u03b8, between C i and T k can be gotten from the formula:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The vector cosine correlation measure",
                "sec_num": "4.2.4"
            },
            {
                "text": "cos \u03b8 = C \u2022 T ||C|| ||T||",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The vector cosine correlation measure",
                "sec_num": "4.2.4"
            },
            {
                "text": "This measure will vary in the range [0, 1] where 1 indicates the two distributions are identical.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The vector cosine correlation measure",
                "sec_num": "4.2.4"
            },
            {
                "text": "correlation measure The hypergeometric distribution (HD) (Devore, 1999, pg. 122) is often associated in with the probability of drawing lottery numbers that match the winning numbers. In the way the HD is used here, the winning lotto numbers are analogous to the words of the class indicated phrases and the most probable terms in a topic are analogous to the numbers on the lotto ticket. The HD assumes the following:",
                "cite_spans": [
                    {
                        "start": 57,
                        "end": 80,
                        "text": "(Devore, 1999, pg. 122)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The hypergeometric distribution",
                "sec_num": "4.2.5"
            },
            {
                "text": "1. There is a population of size N to be sampled from. 2. Each member of the population can either be a success or a failure. There are M successes in the population. 3. A sample of size n is drawn in an independent and identically distributed manner. N = 18, 000 is the total number of terms in both the class and topic multinomial distributions. For a given class, C, a term is defined as a success if it matches one of the terms from the class characteristic phrases, for a total of M possible successes. C M denotes the set of those success terms. Now, given the kth topic T k , T k,M is the set of the M most probable terms in that topic. Let I k be the number of elements in the intersection set C M \u2229 T k,M . The probability of I k , for the subset of hypergeometric distributions where n = M is:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The hypergeometric distribution",
                "sec_num": "4.2.5"
            },
            {
                "text": "P (I k |N, M ) = M I k N \u2212 M M \u2212 I k N M",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The hypergeometric distribution",
                "sec_num": "4.2.5"
            },
            {
                "text": "The lower the above probability is the greater the chance of correlation between C i and T k . Since this probability can be extremely small, log probabilities are used to express it. Therefore, the range of this correlation measure is (\u2212\u221e, 0).",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The hypergeometric distribution",
                "sec_num": "4.2.5"
            },
            {
                "text": "Another simpler measure of distribution correlation is the amount of probability mass the two distributions share. The formula for calculating this measure for the distributions of the ith class C i and the kth topic T k is:",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The distribution intersection correlation measure",
                "sec_num": "4.2.6"
            },
            {
                "text": "DI(C i , T k ) = N j=1 min(C i [j], T k [j])",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The distribution intersection correlation measure",
                "sec_num": "4.2.6"
            },
            {
                "text": "where DI stands for \"distribution intersection\", N is the total number of terms in each distribution. This measure also has the range of [0, 1] with 1 meaning the two distributions are the same.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "The distribution intersection correlation measure",
                "sec_num": "4.2.6"
            },
            {
                "text": "To reiterate the problem definition, we seek to determine if there is enough evidence to reject the null hypothesis in favor of the alternative hypothesis. Since we set \u03b1 = 0.05, this means that the real model must have a better correlation should score then 95% of the random models, for a given model and type of correlation measure. The final performance results are measured in terms of the percentage of classes where the H 0 could be rejected. In many cases, the real model did better than all 100 of the pseudo-models so results are also provided for the case where we had set our H 0 rejection threshold to \u03b1 = 0.01.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Results for using 100 random models",
                "sec_num": "5.1"
            },
            {
                "text": "Three different correlation measures were used: vector cosine (VC), distribution intersection (DI), and hypergeometric distribution (HD). Table 3 shows the results for the models of various numbers of topics and for the three correlation measures. The table gives the percentage of classes that have p-values less than 0.05 and 0.01.",
                "cite_spans": [],
                "ref_spans": [
                    {
                        "start": 138,
                        "end": 145,
                        "text": "Table 3",
                        "ref_id": "TABREF5"
                    }
                ],
                "eq_spans": [],
                "section": "Results for using 100 random models",
                "sec_num": "5.1"
            },
            {
                "text": "For the DI correlation measure, there was enough evidence to reject H 0 at \u03b1 = 0.05 for comfortably over 90% of the classes for all eight LDA models classes and this was nearly true at \u03b1 = 0.01.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Results for using 100 random models",
                "sec_num": "5.1"
            },
            {
                "text": "The results for the VC correlation measure are less significant where only five out of eight of the models could claim to reject H 0 for more than 90% of the classes for \u03b1 = 0.05. Also, the correlation level fell off for the models with higher num- bers of topics (64, 128, 256) for \u03b1 = 0.05 and there was much larger gap between the correlations at \u03b1 = 0.05 and \u03b1 = 0.01 compared to the much smaller gap for the DI results. One problem with the VC measure is that the angle between the problem with C i and T k vectors is only measuring differences in the terms that have nonzero probabilities. Therefore, this measure is less restrictive, allowing for a greater chance that a random topic may have the right combination of terms so that its correlation with a class will be better than the corresponding real model's best correlation. The HP measure was the worst that \u03b1 = 0.05 but in the middle for \u03b1 = 0.01. one interesting trend is that it does much better then the VC measure for high topic models (128, 256.) The DI correlation measure shows the generally higher correlation scores which does not necessarily mean it is the best measure for our purpose. Yet, it is a straightforward measure of the correlation between two distributions and it is the most straightforward to calculate.",
                "cite_spans": [
                    {
                        "start": 264,
                        "end": 268,
                        "text": "(64,",
                        "ref_id": null
                    },
                    {
                        "start": 269,
                        "end": 273,
                        "text": "128,",
                        "ref_id": null
                    },
                    {
                        "start": 274,
                        "end": 278,
                        "text": "256)",
                        "ref_id": null
                    },
                    {
                        "start": 1004,
                        "end": 1015,
                        "text": "(128, 256.)",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Results for using 100 random models",
                "sec_num": "5.1"
            },
            {
                "text": "The evidence that LDA topics may mirror certain parts of linguistic instincts looks fairly convincing from the tests using 100 random LDA models. To add weight to these results more Monte-Carlo simulations were run using 1000 completely different random LDA models. The results are shown in table 4.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Results for using 1000 random models",
                "sec_num": "5.2"
            },
            {
                "text": "Notice that the column reporting the results for the DI correlation measurement and with \u03b1 = 0.05, has the exact same values as those for hundred If average the percentages for the 2,4, 8,16 ,32 and 64 topic models for the hundred and thousand models test for each column from tables 3 and 4 then three of the columns are exactly the same and two have a change of 1% are less. That the change from the hundred model simulation to the thousand model simulation was minimal is a good sign that this technique of measuring the correlation is stable and adds weight to its validity.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Results for using 1000 random models",
                "sec_num": "5.2"
            },
            {
                "text": "Real LDA models and the judgments of the linguists in classifying the corpus do appear to be significantly well correlated when compared to random LDA models. The distribution intersection correlation is used successfully here as a simple yet effective way of measuring the correspondence between the phrases that the linguists came up with to characterise classes and the words of the topics. The hypergeometric distribution and vector cosine correlation measures also showed significant correlation strengths but to a lesser degree than the DI measure.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "6"
            },
            {
                "text": "The results reported on here should add to the confidence of the NLP field that the LDA corpus model, even though it is only an approximate statistical model, can correspond to human judgments as to what the salient features of a document corpus are. 3 To have the exact same values may seen strange at first but these are percentagesof classes that beat more than 5% of the random models. Some of the classes that did well in the hundred model test did not meet the significance cut off in the thousand model test and vice versa but the end result was the same.",
                "cite_spans": [
                    {
                        "start": 251,
                        "end": 252,
                        "text": "3",
                        "ref_id": null
                    }
                ],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Conclusion",
                "sec_num": "6"
            },
            {
                "text": "The correlation measure used to determine the most correlated class is the distributional intersection (DI) measure which is described later in the methods section.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            },
            {
                "text": "There is some dispute as to whether r/n or (r +1)/(n + 1) is the better p-value estimator.(Ewens, 2003) and(Broman and Caffo, 2003) prove that (r + 1)/(n + 1) is biased so we use r/n here.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "",
                "sec_num": null
            }
        ],
        "back_matter": [
            {
                "text": "The authors would like to thank Dr. Alex Smola and Dr. Sanjay Chawla for their input into this research.",
                "cite_spans": [],
                "ref_spans": [],
                "eq_spans": [],
                "section": "Acknowledgements",
                "sec_num": null
            }
        ],
        "bib_entries": {
            "BIBREF0": {
                "ref_id": "b0",
                "title": "Letter to the editor on \"simulation-based p values: Response to north",
                "authors": [
                    {
                        "first": "D",
                        "middle": [
                            "B V"
                        ],
                        "last": "Curtis",
                        "suffix": ""
                    },
                    {
                        "first": "P",
                        "middle": [
                            "C"
                        ],
                        "last": "North",
                        "suffix": ""
                    },
                    {
                        "first": ";",
                        "middle": [],
                        "last": "Sham",
                        "suffix": ""
                    }
                ],
                "year": 2002,
                "venue": "Am J Hum Genet",
                "volume": "71",
                "issue": "",
                "pages": "439--440",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "D. Curtis B. V. North and P. C. Sham. 2002. Letter to the editor on \"simulation-based p values: Response to north et al..\". Am J Hum Genet, 71:439-440.",
                "links": null
            },
            "BIBREF1": {
                "ref_id": "b1",
                "title": "Probabilistic models of text and images",
                "authors": [
                    {
                        "first": "David",
                        "middle": [],
                        "last": "Blei",
                        "suffix": ""
                    }
                ],
                "year": 2004,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "David Blei. 2004. Probabilistic models of text and im- ages. Ph.D. thesis, U.C. Berkeley.",
                "links": null
            },
            "BIBREF2": {
                "ref_id": "b2",
                "title": "Letter to the editor on \"simulation-based p values: Response to north",
                "authors": [
                    {
                        "first": "Karl",
                        "middle": [
                            "W"
                        ],
                        "last": "Broman",
                        "suffix": ""
                    },
                    {
                        "first": "Brian",
                        "middle": [
                            "S"
                        ],
                        "last": "Caffo",
                        "suffix": ""
                    },
                    {
                        "first": ";",
                        "middle": [],
                        "last": "",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "Am J Hum Genet",
                "volume": "72",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Karl W. Broman and Brian S. Caffo. 2003. Letter to the editor on \"simulation-based p values: Response to north et al..\". Am J Hum Genet, 72:496.",
                "links": null
            },
            "BIBREF3": {
                "ref_id": "b3",
                "title": "Graphical models for discovering knowledge",
                "authors": [
                    {
                        "first": "L",
                        "middle": [],
                        "last": "Wray",
                        "suffix": ""
                    },
                    {
                        "first": "",
                        "middle": [],
                        "last": "Buntine",
                        "suffix": ""
                    }
                ],
                "year": 1995,
                "venue": "Advances in Knowledge Discovery and Data Mining",
                "volume": "",
                "issue": "",
                "pages": "59--83",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Wray L. Buntine. 1995. Graphical models for discov- ering knowledge. In U. M. Fayyad, G. Piatetsky- Shapiro, P. Smyth, and R. S. Uthurasamy, editors, Advances in Knowledge Discovery and Data Mining, pages 59-83. MIT Press.",
                "links": null
            },
            "BIBREF4": {
                "ref_id": "b4",
                "title": "Probability and Statistics for Engineering and the Sciences",
                "authors": [
                    {
                        "first": "Jay",
                        "middle": [
                            "L"
                        ],
                        "last": "Devore",
                        "suffix": ""
                    }
                ],
                "year": 1999,
                "venue": "",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Jay L. Devore. 1999. Probability and Statistics for Engi- neering and the Sciences. Duxbury.",
                "links": null
            },
            "BIBREF5": {
                "ref_id": "b5",
                "title": "Letter to the editor on \"on estimating p-values by monte carlo methods",
                "authors": [
                    {
                        "first": "Warren",
                        "middle": [
                            "J"
                        ],
                        "last": "Ewens",
                        "suffix": ""
                    }
                ],
                "year": 2003,
                "venue": "Am J Hum Genet",
                "volume": "72",
                "issue": "",
                "pages": "496--497",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Warren J. Ewens. 2003. Letter to the editor on \"on esti- mating p-values by monte carlo methods\". Am J Hum Genet, 72:496-497.",
                "links": null
            },
            "BIBREF6": {
                "ref_id": "b6",
                "title": "The scamseek project -text mining for financial scams on the internet",
                "authors": [
                    {
                        "first": "Jon",
                        "middle": [],
                        "last": "Patrick",
                        "suffix": ""
                    }
                ],
                "year": 2006,
                "venue": "Selected Papers from AusDM",
                "volume": "",
                "issue": "",
                "pages": "295--302",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "Jon Patrick. 2006. The scamseek project -text mining for financial scams on the internet. In Selected Papers from AusDM, pages 295-302.",
                "links": null
            },
            "BIBREF7": {
                "ref_id": "b7",
                "title": "Probabilistic topic models",
                "authors": [
                    {
                        "first": "M",
                        "middle": [],
                        "last": "Steyvers",
                        "suffix": ""
                    },
                    {
                        "first": "T",
                        "middle": [],
                        "last": "Griffiths",
                        "suffix": ""
                    }
                ],
                "year": 2005,
                "venue": "Latent Semantic Analysis: A Road to Meaning. Laurence Erlbaum",
                "volume": "",
                "issue": "",
                "pages": "",
                "other_ids": {},
                "num": null,
                "urls": [],
                "raw_text": "M. Steyvers and T. Griffiths. 2005. Probabilistic topic models. In T. Landauer, D. Mcnamara, S. Dennis, and W. Kintsch, editors, Latent Semantic Analysis: A Road to Meaning. Laurence Erlbaum.",
                "links": null
            }
        },
        "ref_entries": {
            "TABREF1": {
                "type_str": "table",
                "num": null,
                "content": "<table><tr><td>: The 25 top terms from four sample topics</td></tr><tr><td>induced from the ScamSeek corpus for a 64 topic</td></tr><tr><td>model.</td></tr></table>",
                "html": null,
                "text": ""
            },
            "TABREF3": {
                "type_str": "table",
                "num": null,
                "content": "<table/>",
                "html": null,
                "text": "The average percentage of the 7 top ranked topics from each document in six different LDA models."
            },
            "TABREF4": {
                "type_str": "table",
                "num": null,
                "content": "<table><tr><td/><td/><td/><td/><td/><td/><td>05</td></tr><tr><td>2</td><td>79.6</td><td>91.8</td><td>89.8</td><td>100</td><td>83.7</td><td>87.8</td></tr><tr><td>4</td><td>77.6</td><td>95.9</td><td>91.8</td><td>100</td><td>85.7</td><td>91.8</td></tr><tr><td>8</td><td>79.6</td><td>95.9</td><td>91.8</td><td>95.9</td><td>85.7</td><td>91.8</td></tr><tr><td>16</td><td>77.6</td><td>93.9</td><td>91.8</td><td>95.9</td><td>85.7</td><td>87.8</td></tr><tr><td>32</td><td>73.5</td><td>91.8</td><td>93.9</td><td>95.9</td><td>85.7</td><td>87.8</td></tr><tr><td>64</td><td>63.3</td><td>85.7</td><td>89.8</td><td>93.9</td><td>91.8</td><td>93.9</td></tr><tr><td>128</td><td>61.2</td><td>79.6</td><td>91.8</td><td>98</td><td>91.8</td><td>95.9</td></tr><tr><td>256</td><td>69.4</td><td>75.5</td><td>87.8</td><td>93.9</td><td>98</td><td>98</td></tr></table>",
                "html": null,
                "text": "Vector Cosine Distrib. Intersection Hypergeometric Topics %<0.01 %<0.05 %<0.01 %<0.05 %<0.01 %<0."
            },
            "TABREF5": {
                "type_str": "table",
                "num": null,
                "content": "<table><tr><td>: The %'s of classes having p &lt; 0.01 and</td></tr><tr><td>p &lt; 0.05 for 3 different correlation measures using</td></tr><tr><td>100 random LDA models for the Monte-Carlo sim-</td></tr><tr><td>ulation .</td></tr></table>",
                "html": null,
                "text": ""
            },
            "TABREF6": {
                "type_str": "table",
                "num": null,
                "content": "<table><tr><td/><td/><td/><td/><td/><td/><td>05</td></tr><tr><td>2</td><td>85.7</td><td>93.9</td><td>93.9</td><td>100</td><td>85.7</td><td>91.8</td></tr><tr><td>4</td><td>81.6</td><td>95.9</td><td>91.8</td><td>100</td><td>85.7</td><td>91.8</td></tr><tr><td>8</td><td>81.6</td><td>93.9</td><td>93.9</td><td>95.9</td><td>87.8</td><td>91.8</td></tr><tr><td>16</td><td>79.6</td><td>93.9</td><td>91.8</td><td>95.9</td><td>87.8</td><td>89.8</td></tr><tr><td>32</td><td>77.6</td><td>91.8</td><td>91.8</td><td>95.9</td><td>87.8</td><td>87.8</td></tr><tr><td>64</td><td>73.5</td><td>85.7</td><td>91.8</td><td>93.9</td><td>93.9</td><td>93.9</td></tr></table>",
                "html": null,
                "text": "Vector Cosine Distrib. Intersection Hypergeometric Topics %<0.01 %<0.05 %<0.01 %<0.05 %<0.01 %<0."
            },
            "TABREF7": {
                "type_str": "table",
                "num": null,
                "content": "<table/>",
                "html": null,
                "text": "The %'s of classes having p < 0.01 and p < 0.05 for 3 different correlation measures using 1000 random LDA models for the Monte-Carlo simulation . model simulation in table 4.3"
            }
        }
    }
}