| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T11:58:08.613572Z" |
| }, |
| "title": "Life still goes on: Analysing Australian WW1 Diaries through Distant Reading", |
| "authors": [ |
| { |
| "first": "Ashley", |
| "middle": [], |
| "last": "Dennis-Henderson", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "ARC Centre of Excellence for Mathematical and Statistical Frontiers (ACEMS)", |
| "institution": "The University of Adelaide", |
| "location": {} |
| }, |
| "email": "ashley.dennis-henderson@adelaide.edu.au" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Roughan", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "ARC Centre of Excellence for Mathematical and Statistical Frontiers (ACEMS)", |
| "institution": "The University of Adelaide", |
| "location": {} |
| }, |
| "email": "matthew.roughan@adelaide.edu.au" |
| }, |
| { |
| "first": "Lewis", |
| "middle": [], |
| "last": "Mitchell", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "ARC Centre of Excellence for Mathematical and Statistical Frontiers (ACEMS)", |
| "institution": "The University of Adelaide", |
| "location": {} |
| }, |
| "email": "lewis.mitchell@adelaide.edu.au" |
| }, |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "Tuke", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "ARC Centre of Excellence for Mathematical and Statistical Frontiers (ACEMS)", |
| "institution": "The University of Adelaide", |
| "location": {} |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "An increasing amount of historic data is now available in digital (text) formats. This gives quantitative researchers an opportunity to use distant reading techniques, as opposed to traditional close reading, in order to analyse larger quantities of historic data. Distant reading allows researchers to view overall patterns within the data and reduce researcher bias. One such data set that has recently been transcribed is a collection of over 500 Australian World War I (WW1) diaries held by the State Library of New South Wales. Here we apply distant reading techniques to this corpus to understand what soldiers wrote about and how they felt over the course of the war. Extracting dates accurately is important as it allows us to perform our analysis over time, however, it is very challenging due to the variety of date formats and abbreviations diarists use. But with that data, topic modelling and sentiment analysis can then be applied to show trends, for instance, that despite the horrors of war, Australians in WW1 primarily wrote about their everyday routines and experiences. Our results detail some of the challenges likely to be encountered by quantitative researchers intending to analyse historical texts, and provide some approaches to these issues.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "An increasing amount of historic data is now available in digital (text) formats. This gives quantitative researchers an opportunity to use distant reading techniques, as opposed to traditional close reading, in order to analyse larger quantities of historic data. Distant reading allows researchers to view overall patterns within the data and reduce researcher bias. One such data set that has recently been transcribed is a collection of over 500 Australian World War I (WW1) diaries held by the State Library of New South Wales. Here we apply distant reading techniques to this corpus to understand what soldiers wrote about and how they felt over the course of the war. Extracting dates accurately is important as it allows us to perform our analysis over time, however, it is very challenging due to the variety of date formats and abbreviations diarists use. But with that data, topic modelling and sentiment analysis can then be applied to show trends, for instance, that despite the horrors of war, Australians in WW1 primarily wrote about their everyday routines and experiences. Our results detail some of the challenges likely to be encountered by quantitative researchers intending to analyse historical texts, and provide some approaches to these issues.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "World War I (WW1) was a defining event of the 20th century, and impacted millions worldwide. Researchers have studied the war, especially the experiences of those on the front lines. Primarily, this has been done through close reading of primary sources such as diaries and letters. However, recent advances in computational methods to analyse large text corpora offers the opportunity to analyse sources such as these through distant reading. Distant reading involves the application of mathematical and computational techniques from natural language processing (NLP) to perform statistical analysis of text (J\u00e4nicke et al., 2015) . Distant reading has several advantages, including the ability to analyse large quantities of data and see overall patterns as well as the reduction of researcher bias. Further, distant and close reading can be combined such that interesting patterns found through distant reading can be more closely examined using close reading to determine why they occur. This work aims to use distant reading to understand what Australian soldiers went through and how they felt over the course of WW1, by analysing a unique historical data set: a large collection of transcriptions of Australian soldiers' diaries, held by the State Library of New South Wales. To our knowledge this paper represents the first NLP analysis of this data set.", |
| "cite_spans": [ |
| { |
| "start": 609, |
| "end": 631, |
| "text": "(J\u00e4nicke et al., 2015)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "This research takes advantage of the fact that diaries contain temporal information. However, extracting dates is a difficult task due to the varying manner in which dates can be written. This is further complicated by the desire to focus on the dates on which entries were written and not dates mentioned within the entries as these may refer to times and events from outside the war or at least out of the context of the current entry. In order to extract and clean dates we use a combination of regular expressions and optimisation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Once dates were extracted we were able to apply topic modelling and sentiment analysis as a function of time. We are able to detect topics corresponding to particular developments of the war, and the associated sentiment for those periods. Further, we show that the diarists wrote more about everyday experiences, e.g., the time of day and meals, than they did about training and battles. This might be surprising as the war was one of the most traumatic events of the twentieth century and conventional historical narratives concentrate on the pain and suffering of the soldiers. However, in the diaries, we see the war's participants adapting their everyday lives to their circumstances, and in fact their overall sentiment across the war is surprisingly positive.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We focus on Australian WW1 diaries held by the State Library of New South Wales. After the war ended, the European War Collecting Project was created by Principal Librarian William Ifould and the trustees of the Library (State Library of New South Wales, 2019). Their aim was to collect documents, including diaries, letters, war narratives, memoirs and photographs, which gave the experiences and personal feelings of those who served. In total, this collection has 966 documents, 557 of which are nonempty war diaries. A complete breakdown of the collection is given in Table 1 . Since collecting these documents, the library has scanned them and used crowd sourcing to transcribe them, giving researchers access to digital (text) versions of the documents. Table 1 : The number of each type of document in the NSW State Library Collection, along with the number of pages, words and authors. The \"Other\" category includes documents such as telegrams, photos, postcards, scrapbooks, and newspaper clippings. Note, there are a total of 577 diaries in this collection, however, 20 of the transcribed diaries were empty, and so we only analyse 557 diaries.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 572, |
| "end": 579, |
| "text": "Table 1", |
| "ref_id": null |
| }, |
| { |
| "start": 760, |
| "end": 767, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Corpus", |
| "sec_num": "2" |
| }, |
| { |
| "text": "These documents can be individually downloaded from the State Library's website (State Library of New South Wales, 2020), however, we obtained the corpus directly from the Library. Before this corpus could be used it went through a variety of cleaning steps. First, the raw data was converted to a single text file per document, and a metadata table was created by using regular expressions to extract information from the document titles. Then dates had to be extracted so that we could perform analysis over time. Raw dates were extracted using regular expressions, however, several issues were found in these raw dates requiring us to clean them through optimisation. More information regarding this is given in Section 4.1. Finally, we changed all text to lowercase, removed numbers and punctuation, singularised words, converted abbreviations to the full word, and for topic modelling we removed stop words. Converting to lowercase, singularising words and converting abbreviations were all done to ensure that the various versions of a word are considered as the same word when performing our analysis. For example, \"kill\", \"killing\", and \"Kills\" all have the same base word: \"kill\". The stop words we removed were based on the stop words data set in the tidytext package (Silge and Robinson, 2006) in R. Figure 1 shows the number of words in our diary corpus per month after this process. Unsurprisingly, the majority of entries were written between August 1914 and December 1919 -Britain, and consequently 3 Related Work and Background", |
| "cite_spans": [ |
| { |
| "start": 1278, |
| "end": 1304, |
| "text": "(Silge and Robinson, 2006)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1311, |
| "end": 1317, |
| "text": "Figure", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Corpus", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Our corpus has previously been studied by Caulfield (2013) and Cochrane (2015) . However, their analysis was based on close reading of a small subsection of the diaries. As far as we are aware, distant reading techniques have not previously been applied to this corpus. However, distant reading techniques have been broadly applied to other historic documents. For example, Boschetti et al. (2014) used computational techniques to analyse Italian war bulletins as part of the Memories of War project and Ahmad et al. (2012) developed a tool to map spelling from medieval documents to modern spellings, amongst numerous other examples. Analysis of diaries presents an additional challenge as to use the important temporal data, we must extract a large number of dates.", |
| "cite_spans": [ |
| { |
| "start": 42, |
| "end": 58, |
| "text": "Caulfield (2013)", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 63, |
| "end": 78, |
| "text": "Cochrane (2015)", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 374, |
| "end": 397, |
| "text": "Boschetti et al. (2014)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis of Historic Documents", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Topic modelling is based on the idea that documents are made up of a series of topics, which in turn are a probability distribution over words (Steyvers and Griffiths, 2007) . Currently, the primary method to perform topic modelling is LDA (Latent Dirichlet Allocation) which was initially introduced by Blei et al. (2003) . For a description of the mathematics behind LDA please see Blei et al. (2003) . Sentiment analysis aims to determine the attitude or emotion of the author towards the content of the text. An overview of sentiment analysis can be found in Pang and Lee (2008) or Taboada (2016) . An example of the use of sentiment analysis can be seen in Burghardt et al. (2019) who applied sentiment analysis to the plays of G. E. Lessing. Additional details of our use of these approaches will be provided in the following section.", |
| "cite_spans": [ |
| { |
| "start": 143, |
| "end": 173, |
| "text": "(Steyvers and Griffiths, 2007)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 316, |
| "end": 322, |
| "text": "(2003)", |
| "ref_id": null |
| }, |
| { |
| "start": 384, |
| "end": 402, |
| "text": "Blei et al. (2003)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 563, |
| "end": 582, |
| "text": "Pang and Lee (2008)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 586, |
| "end": 600, |
| "text": "Taboada (2016)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 662, |
| "end": 685, |
| "text": "Burghardt et al. (2019)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Analysis Techniques", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Extracting accurate dates from the diaries is important as we wish to perform our analysis over time. However, this is difficult due to the many ways in which dates are written. Raw dates were extracted using regular expressions, attempting to account for the various date formats, possible abbreviations of month and day of the week names, punctuation, and that some dates were written in French. After extracting these raw dates three main issues were discovered. First many dates were missing the month or year values, as from a human perspective it is not necessary to include this information if it was included in a previous date. In these diaries 13.91% of dates were missing the month and 53.76% were missing the year. Second, diarists sometimes wrote the wrong date, either due to not knowing the exact date or accidentally writing down the wrong day/month. The final issue is that we only want to extract the dates when the entries were written. However, regular expressions will also pick up dates of events mentioned within an entry as well as strings that look like dates such as 1st battalion, neither of which we wish to focus on here. We overcame these issues by creating an optimisation program which outputs dates as close as possible to the true date by (i) keeping the dates close to their raw extracted version; (ii) keeping them close to the previous date in sequence; (iii) maintaining the sequence of dates; and (iv) keeping them in the range determined by the known start and end dates of the diary. The optimisation can also exclude dates that appear out of sequence, presenting them as references. We will provide code to perform this task on request.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Date Extraction", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "In this paper we focus on using LDA (Latent Dirichlet Allocation) to perform topic modelling. This model was implemented using the topicmodels package (Gr\u00fcn and Hornik, 2011) in R, using Gibbs Sampling with 10 topics and a randomly chosen seed of 1915.", |
| "cite_spans": [ |
| { |
| "start": 151, |
| "end": 174, |
| "text": "(Gr\u00fcn and Hornik, 2011)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Modelling", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The number of topics used was chosen based on four methods, by Arun et al. 2010, Cao et al. (2008) , Deveaud et al. (2014) , and Griffiths and Steyvers (2004) , which were implemented using the ldatuning package (Nikita, 2020) in R. The results from each method are given in Figure 2 . Based on this, we find that the optimal number of topics for Griffiths2004 is 8 or more, for CaoJuan2009 is 17 or more, for Arun2010 is 6, 7 or 10, and for Deveaud2014 is 8 -12. We chose to use 10 topics since this falls in the range of best parameters for three of the methods. Figure 2 : Results found by applying the four methods for determining the number of topics created by Arun et al. (2010), Cao et al. (2008) , Deveaud et al. (2014) , and Griffiths and Steyvers (2004) . We chose 10 topics as it falls in the optimal range for three approaches.", |
| "cite_spans": [ |
| { |
| "start": 81, |
| "end": 98, |
| "text": "Cao et al. (2008)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 101, |
| "end": 122, |
| "text": "Deveaud et al. (2014)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 143, |
| "end": 158, |
| "text": "Steyvers (2004)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 212, |
| "end": 226, |
| "text": "(Nikita, 2020)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 687, |
| "end": 704, |
| "text": "Cao et al. (2008)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 707, |
| "end": 728, |
| "text": "Deveaud et al. (2014)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 749, |
| "end": 764, |
| "text": "Steyvers (2004)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 275, |
| "end": 283, |
| "text": "Figure 2", |
| "ref_id": null |
| }, |
| { |
| "start": 565, |
| "end": 573, |
| "text": "Figure 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Topic Modelling", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "There are three general categories of sentiment analysis: dictionary based methods (DBMs), supervised learning methods, and unsupervised learning methods (Reagan et al., 2017) . We focus on DBMs as they can be applied to corpora where there is no previous known information regarding the sentiment. DBMs compare the terms within the corpus with a dictionary of terms with known sentiment values. Let f T (w) be the frequency of word w in text T , and s D (w) be the sentiment of word w in dictionary D, then the average sentiment of the text is given by (Reagan et al., 2017)", |
| "cite_spans": [ |
| { |
| "start": 154, |
| "end": 175, |
| "text": "(Reagan et al., 2017)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentiment Analysis", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "s T D = w\u2208D s D (w)f T (w) w\u2208D f T (w)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentiment Analysis", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": ".", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentiment Analysis", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "( 1)For our analysis we tested the following dictionaries: AFINN, ANEW, Hului, Loughran-Mcdonald, NRC, SenticNet, SentiWordNet, and Syuzhet. These dictionaries primarily come from the lexicon package (Rinker, 2018) in R. The two dictionaries not available through this package are AFINN, which was accessed using the tidytext package (Silge and Robinson, 2006) , and ANEW which was obtained from Andrew Reagan's GitHub folder: https://github.com/andyreagan/labMT-simple/ tree/master/labMTsimple/data/ANEW. We can consider the percentage of unique words in our diaries which appear in the sentiment dictionaries, and compare this to the Brown Corpus, a standard corpus in NLP analysis. The Brown Corpus contains 1,006,770 words, including 45,215 unique words, from a collection of documents printed in the United States in 1961 (Francis and Kucera, 1971) . The words contained in the Brown Corpus were obtained using the zipfR package (Evert and Baroni, 2007) . Table 2 gives the number of words and possible sentiment values each dictionary has as well as the percentage of unique words in our diaries and the Brown Corpus which appear in the dictionaries. We note that approximately twice as many unique words from the Brown Corpus are covered by these dictionaries. This is despite the fact that our diary corpus contains more unique words (84,955 words) than the Brown corpus does. This is likely because none of these dictionaries were created for wartime text. Table 2 : The number of words and possible sentiment values in each of the eight sentiment dictionaries, as well as the percentage of unique words in the diaries and the Brown Corpus which appear in each dictionary. We can see that SenticNet provides the broadest coverage.", |
| "cite_spans": [ |
| { |
| "start": 200, |
| "end": 214, |
| "text": "(Rinker, 2018)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 334, |
| "end": 360, |
| "text": "(Silge and Robinson, 2006)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 827, |
| "end": 853, |
| "text": "(Francis and Kucera, 1971)", |
| "ref_id": null |
| }, |
| { |
| "start": 934, |
| "end": 958, |
| "text": "(Evert and Baroni, 2007)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 961, |
| "end": 968, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 1466, |
| "end": 1473, |
| "text": "Table 2", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sentiment Analysis", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "In order to compare the results from different dictionaries they are required to be on the same rating scale. As five of the dictionaries are already on the scale (\u22121, 1) we chose to convert the others to this. AFINN and ANEW were converted to this scale using the formula:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentiment Analysis", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "x new = max new \u2212 min new max old \u2212 min old (x old \u2212 min old ) + min new ,", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Sentiment Analysis", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "where x old and x new are the old and new value, respectively, [min old , max old ] is the old value range, and [min new , max new ] is the new value range. The Huliu lexicon was converted to the range (\u22121, 1) by converting any word with a sentiment score of -2 to a score of -1. This could be done as a score of -2 was given to phrases that are always negative, e.g., \"too much fun\" (Rinker, 2019) . For both topic modelling and sentiment analysis we considered a \"document\" to be all of the diary entries written in a particular month.", |
| "cite_spans": [ |
| { |
| "start": 384, |
| "end": 398, |
| "text": "(Rinker, 2019)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Sentiment Analysis", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "When graphing our results we have applied a rolling mean using the rollmean function in the zoo package (Zeileis and Grothendieck, 2005) in R, with a rolling window of k = 5, in order to smooth out noise in the data. Due to the lack of data in 1923, as seen in Figure 1 , it is not possible to calculate this rolling mean and hence, results for this year are not included.", |
| "cite_spans": [ |
| { |
| "start": 104, |
| "end": 136, |
| "text": "(Zeileis and Grothendieck, 2005)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 261, |
| "end": 269, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sentiment Analysis", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "The most probable words for each of our 10 topics are shown in Appendix A. Based on the most probable words, we selected names for each of our topics, hence our topics are: Everyday Life, War at Sea, Egypt, Gallipoli, In the Trenches (Beginning), In the Trenches (Middle), In the Trenches (End), White Christmas, After the Armistice, and Home Again. Note, the most probable words in all three In the Trenches topics are regarding battles, the Western Front and the Middle East. Hence, we differentiate these topics as beginning, middle and end, based on where they peak in Figure 3 . The proportion of each of these topics is shown as a function of time in Figure 3 . Figure 3 : The proportion of each topic obtained from our LDA model, over time. Note that a rolling mean with k = 5 has been applied to each point.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 573, |
| "end": 581, |
| "text": "Figure 3", |
| "ref_id": null |
| }, |
| { |
| "start": 657, |
| "end": 665, |
| "text": "Figure 3", |
| "ref_id": null |
| }, |
| { |
| "start": 668, |
| "end": 676, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Topic Modelling", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Based on the most probable words as well as when the topics peak in Figure 3 , several of these topics relate to specific developments of the war. War at Sea corresponds to the Australian occupation of German New Guinea and the sinking of the German raider the Emden. Egypt corresponds to the training of Australian troops on the outskirts of Cairo and battles around Egypt and the Suez Canal. Gallipoli corresponds to the Gallipoli Campaign, which for many Australian soldiers was their first experience in battle. The three In the Trenches topics cover the period when Australians were fighting on the Western Front and in the Middle East. The peaks in these three topics most likely correspond to specific battles such as the Battle of Romani (August 1916), the Second Battle of Arras (April-May 1917), the Battle of Jerusalem (November-December 1917) , and the Battle of Hamel (July 1918) . After the Armistice corresponds to the period after the armistice was signed in November 1918 that Australian soldiers had to wait before being sent home. For some soldiers it took up to a year to be repatriated and in this time they travelled around France and Britain as well as receiving vocational training from the AIF (Australian Imperial Force) (DVA, 2020). We also have two more general topics. Everyday Life is consistently the most prominent topic until December 1919. This topic includes words related to everyday things such as the time of day and meals. This shows that whilst the diarists did write about war related things, such as training and battles, they primarily focused on their ordinary day-to-day activities. After 1919 the Home Again topic becomes most prominent. This is expected as this topic contains words related to being back in Australia, such as \"mum\", \"dad\" and \"shopping\", and corresponds to when the soldiers would have returned home. Figure 4 gives the sentiment scores for our diaries over time for the eight sentiment dictionaries we considered as well as the average over these dictionaries. From this graph we first note that five of the dictionaries: AFINN, Huliu, Loughran-Mcdonald, NRC, and Syuzhet, follow the same general pattern. Further, SenticNet and SentiWordNet have a similar trend. Based on Table 2 we know that ANEW covers the least amount of words in our corpus, whilst SenticNet and SentiWordNet cover the most. This shows that our analysis is dependent on the words covered in the dictionaries. We also observe more variability in our sentiment scores in the first half of 1914 and from 1920 onwards. This would be due to only having a small amount of data for those periods as seen in Figure 1 .", |
| "cite_spans": [ |
| { |
| "start": 830, |
| "end": 854, |
| "text": "(November-December 1917)", |
| "ref_id": null |
| }, |
| { |
| "start": 881, |
| "end": 892, |
| "text": "(July 1918)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 68, |
| "end": 76, |
| "text": "Figure 3", |
| "ref_id": null |
| }, |
| { |
| "start": 1866, |
| "end": 1874, |
| "text": "Figure 4", |
| "ref_id": null |
| }, |
| { |
| "start": 2239, |
| "end": 2246, |
| "text": "Table 2", |
| "ref_id": null |
| }, |
| { |
| "start": 2638, |
| "end": 2646, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Topic Modelling", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "In the next section we compare our average sentiment curve with our topic model to understand why the sentiment peaks and dips at certain times. Figure 4 : Sentiment scores over time for the eight dictionaries: AFINN, ANEW, Huliu, Loughran-Mcdonald, NRC, SenticNet, SentiWordNet, and Syuzhet, as well as the average of these dictionaries. Note, that before graphing we have applied a rolling mean, with k = 5, to each of the dictionaries.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 145, |
| "end": 153, |
| "text": "Figure 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Sentiment Analysis", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "The average sentiment curve shown in Figure 4 has several peaks and dips in sentiment. We investigate what these correspond to by comparing our sentiment with our topic model. Due to the variability in individual sentiment dictionaries prior to August 1914 and after December 1919 we do not consider these periods. Further, we exclude the Everyday Life and Home Again topics as they are prominent over large periods of time and hence are not likely to contribute to particular peaks and dips in sentiment. Figure 5 gives the comparison between topic probabilities and average sentiment scores. In Figure 5 we note there are peaks in sentiment corresponding to peaks in the Egypt and After the Armistice topics, whilst there are dips in sentiment corresponding to the peaks in the Gallipoli and White Christmas topics. When arriving in Egypt for training, the soldiers would most likely have been excited about being in a new country and be keen to prove themselves in battle. This, combined with the fact that whilst in Egypt the men were able to take small trips into Cairo and around the pyramids, would lead to a more positive sentiment for that period. Contrary to this, the Gallipoli campaign would have been the first battle experience for many of the soldiers leading to a more negative sentiment. Thomas Munro writes:", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 37, |
| "end": 45, |
| "text": "Figure 4", |
| "ref_id": null |
| }, |
| { |
| "start": 506, |
| "end": 514, |
| "text": "Figure 5", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 597, |
| "end": 605, |
| "text": "Figure 5", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Topic Modelling and Sentiment Analysis", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "\"It is an awful sight to see the dead and wounded on both sides, lying out and being walked on, no possibilite [sic] to bring them in or bury them, Some of our men have been out there a month and are still there. The stench would knock you down.\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Modelling and Sentiment Analysis", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "One of the top 40 most probable words in the White Christmas topic is \"miserable\", suggesting that the cold weather lead to many having a more negative sentiment. Through close reading of the diaries over the months surrounding January 1917 we find several negative comments regarding the cold and wet weather. For instance, Langford Colley-Priest writes \"Raining heavily all day which made the conditions more miserable. The mud & slush is terrible.\"", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Modelling and Sentiment Analysis", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Further, whilst some men had a good Christmas, others didn't. The contrast between these Christmas' are seen in the following quotes:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Modelling and Sentiment Analysis", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "\"Christmas dinner and tea were very merry, the rations being supplemented by a lot of luxuries ... also by plum pudding ... \", Hector McLean \"Cold, miserable & hungry, we filed up to the cook house for our \"Christmas dinner\" of Bully beef Stew and buscuits [sic] , as our rations were not yet to hand and our Christmas comforts were delayed somewhere.\", Tom Taylor", |
| "cite_spans": [ |
| { |
| "start": 257, |
| "end": 262, |
| "text": "[sic]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Modelling and Sentiment Analysis", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "It is not surprising that the sentiment rose after the armistice was signed. This rise in sentiment is further strengthened by the fact whilst waiting to be repatriated back to Australia soldiers spent their time travelling around France and Britain, and attending sport matches and plays (DVA, 2020). Overall, our average sentiment during the war is always slightly positive which contradicts the typical perception of the war as horrific experience. This is most likely because the diarists predominantly wrote about everyday activities, which unlike battles, are not necessarily negative.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Modelling and Sentiment Analysis", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "This research aimed to analyse Australian WW1 diaries in order to determine what the soldiers wrote about and how they felt over the course of the war. Through the application of distant reading techniques we have seen that we can analyse large amounts of data to determine trends. Interestingly, while many people typically think of the war as a horrific experience we find that the diarists primarily wrote about their day-to-day activities. As such the diaries had an overall slightly positive sentiment, which is consistent with the positivity bias seen across human languages (Dodds et al., 2015) , but is surprising for this particular corpus.", |
| "cite_spans": [ |
| { |
| "start": 581, |
| "end": 601, |
| "text": "(Dodds et al., 2015)", |
| "ref_id": "BIBREF9" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We focused on DBMs for sentiment, and found that the dictionaries used covered less of the diaries than standard texts such as the Brown Corpus. This suggests that DBMs may not be the most accurate method for determining sentiment in WW1 diaries and as such in the future we will investigate other sentiment analysis techniques such as embedding-based methods to determine if they are more applicable. Further, in the future we will write a paper detailing the difficulties with date extraction, as well as our approach and the accuracy of our method, as this is not a trivial issue. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "6" |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We acknowledge the State Library of New South Wales for providing the data which made this research possible.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| }, |
| { |
| "text": "Tables 3 -12 give the 54 most probable words for each of the topics found using topic modelling.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Appendix A Topics", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Computational Analysis of Medieval Manuscripts: A New Tool for Analysis and Mapping of Medieval Documents to Modern Orthography", |
| "authors": [ |
| { |
| "first": "Mushtaq", |
| "middle": [], |
| "last": "Ahmad", |
| "suffix": "" |
| }, |
| { |
| "first": "Stefan", |
| "middle": [], |
| "last": "Gruner", |
| "suffix": "" |
| }, |
| { |
| "first": "Muhammed Tanvir", |
| "middle": [], |
| "last": "Afzal", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Journal of Universal Computer Science", |
| "volume": "18", |
| "issue": "20", |
| "pages": "2750--2770", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mushtaq Ahmad, Stefan Gruner, and Muhammed Tanvir Afzal. 2012. Computational Analysis of Medieval Manuscripts: A New Tool for Analysis and Mapping of Medieval Documents to Modern Orthography. Journal of Universal Computer Science, 18(20):2750-2770.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "On Finding the Natural Number of Topics with Latent Dirichlet Allocation: Some Observations", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "R Arun", |
| "suffix": "" |
| }, |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Suresh", |
| "suffix": "" |
| }, |
| { |
| "first": "M Narasimha", |
| "middle": [], |
| "last": "Veni Madhavan", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Murty", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Advances in Knowledge Discovery and Data Mining, Part I", |
| "volume": "", |
| "issue": "", |
| "pages": "391--402", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "R Arun, V Suresh, C.E Veni Madhavan, and M Narasimha Murty. 2010. On Finding the Natural Number of Topics with Latent Dirichlet Allocation: Some Observations. In M.J. Zaki, J.X. Yu, B Ravindran, and V Pudi, editors, Advances in Knowledge Discovery and Data Mining, Part I, pages 391 -402. Springer, Berlin. Heidelberg.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Latent Dirichlet Allocation", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [], |
| "last": "David", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Blei", |
| "suffix": "" |
| }, |
| { |
| "first": "Y", |
| "middle": [], |
| "last": "Andrew", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael I Jordan", |
| "middle": [], |
| "last": "Ng", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "3", |
| "issue": "", |
| "pages": "993--1022", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David M Blei, Andrew Y Ng, and Michael I Jordan. 2003. Latent Dirichlet Allocation. Journal of Machine Learning Research, 3:993-1022.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Computational Analysis of Historical Documents: An Application to Italian War Bulletins in World War I and II", |
| "authors": [ |
| { |
| "first": "Federico", |
| "middle": [], |
| "last": "Boschetti", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrea", |
| "middle": [], |
| "last": "Cimino", |
| "suffix": "" |
| }, |
| { |
| "first": "Felice", |
| "middle": [], |
| "last": "Dell'orletta", |
| "suffix": "" |
| }, |
| { |
| "first": "E", |
| "middle": [], |
| "last": "Gianluca", |
| "suffix": "" |
| }, |
| { |
| "first": "Lucia", |
| "middle": [], |
| "last": "Lebani", |
| "suffix": "" |
| }, |
| { |
| "first": "Paolo", |
| "middle": [], |
| "last": "Passaro", |
| "suffix": "" |
| }, |
| { |
| "first": "Giulia", |
| "middle": [], |
| "last": "Picchi", |
| "suffix": "" |
| }, |
| { |
| "first": "Simonetta", |
| "middle": [], |
| "last": "Venturi", |
| "suffix": "" |
| }, |
| { |
| "first": "Alessandro", |
| "middle": [], |
| "last": "Montemagni", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Lenci", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Federico Boschetti, Andrea Cimino, Felice Dell'orletta, Gianluca E Lebani, Lucia Passaro, Paolo Picchi, Giulia Venturi, Simonetta Montemagni, and Alessandro Lenci. 2014. Computational Analysis of Historical Docu- ments: An Application to Italian War Bulletins in World War I and II.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Toward Multimodal Sentiment Analysis of Historic Plays: A Case Study with Text and Audio for Lessing's Emilia Galotti", |
| "authors": [ |
| { |
| "first": "Manuel", |
| "middle": [], |
| "last": "Burghardt", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Wolff", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Schmidt", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "4th Conference of the Association Digital Humanities in the Nordic Countries", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Manuel Burghardt, Christian Wolff, and Thomas Schmidt. 2019. Toward Multimodal Sentiment Analysis of Historic Plays: A Case Study with Text and Audio for Lessing's Emilia Galotti. In 4th Conference of the Association Digital Humanities in the Nordic Countries, Copenhagen.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "A density-based method for adaptive LDA model selection", |
| "authors": [ |
| { |
| "first": "Juan", |
| "middle": [], |
| "last": "Cao", |
| "suffix": "" |
| }, |
| { |
| "first": "Tian", |
| "middle": [], |
| "last": "Xia", |
| "suffix": "" |
| }, |
| { |
| "first": "Jintao", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Yongdong", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Sheng", |
| "middle": [], |
| "last": "Tang", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Neurocomputing", |
| "volume": "72", |
| "issue": "7-9", |
| "pages": "1775--1781", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Juan Cao, Tian Xia, Jintao Li, Yongdong Zhang, and Sheng Tang. 2008. A density-based method for adaptive LDA model selection. Neurocomputing, 72(7-9):1775 -1781.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "The unknown Anzacs : the real stories of our national legend : told through the rediscovered diaries and letters of the Anzacs who were there", |
| "authors": [ |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Caulfield", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael Caulfield. 2013. The unknown Anzacs : the real stories of our national legend : told through the rediscovered diaries and letters of the Anzacs who were there. Hachette Australia, Sydney.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Diamonds of the Dustheap': Diaries from the First World War. Humanities Australia: The Journal of the Australian Academy of the Humanities", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Cochrane", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "22--33", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Cochrane. 2015. 'Diamonds of the Dustheap': Diaries from the First World War. Humanities Australia: The Journal of the Australian Academy of the Humanities, (6):22 -33.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Accurate and effective Latent Concept Modeling for ad hoc information retrieval", |
| "authors": [ |
| { |
| "first": "Romain", |
| "middle": [], |
| "last": "Deveaud", |
| "suffix": "" |
| }, |
| { |
| "first": "Eric", |
| "middle": [], |
| "last": "Sanjuan", |
| "suffix": "" |
| }, |
| { |
| "first": "Patrice", |
| "middle": [], |
| "last": "Bellot", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Document Numerique", |
| "volume": "17", |
| "issue": "1", |
| "pages": "61--84", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Romain Deveaud, Eric SanJuan, and Patrice Bellot. 2014. Accurate and effective Latent Concept Modeling for ad hoc information retrieval. Document Numerique, 17(1):61-84.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Human language reveals a universal positivity bias", |
| "authors": [ |
| { |
| "first": "Eric", |
| "middle": [ |
| "M" |
| ], |
| "last": "Peter Sheridan Dodds", |
| "suffix": "" |
| }, |
| { |
| "first": "Suma", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Desu", |
| "suffix": "" |
| }, |
| { |
| "first": "R", |
| "middle": [], |
| "last": "Morgan", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [ |
| "J" |
| ], |
| "last": "Frank", |
| "suffix": "" |
| }, |
| { |
| "first": "Jake", |
| "middle": [ |
| "Ryland" |
| ], |
| "last": "Reagan", |
| "suffix": "" |
| }, |
| { |
| "first": "Lewis", |
| "middle": [], |
| "last": "Williams", |
| "suffix": "" |
| }, |
| { |
| "first": "Kameron Decker", |
| "middle": [], |
| "last": "Mitchell", |
| "suffix": "" |
| }, |
| { |
| "first": "Isabel", |
| "middle": [ |
| "M" |
| ], |
| "last": "Harris", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [ |
| "P" |
| ], |
| "last": "Kloumann", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Bagrow", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the National Academy of Sciences", |
| "volume": "112", |
| "issue": "8", |
| "pages": "2389--2394", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Sheridan Dodds, Eric M Clark, Suma Desu, Morgan R Frank, Andrew J Reagan, Jake Ryland Williams, Lewis Mitchell, Kameron Decker Harris, Isabel M Kloumann, James P Bagrow, et al. 2015. Human language reveals a universal positivity bias. Proceedings of the National Academy of Sciences, 112(8):2389-2394.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Repatriation of Australians in World War I. DVA (Department of Veterans' Affairs) Anzac Portal", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "DVA. 2020. Repatriation of Australians in World War I. DVA (Department of Veterans' Affairs) Anzac Portal.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "zipfR: Word Frequency Distributions in R", |
| "authors": [ |
| { |
| "first": "Stefan", |
| "middle": [], |
| "last": "Evert", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the 45th Annual Meeting of the Association for Computational Linguistics, Posters and Demonstrations Sessions", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stefan Evert and Marco Baroni. 2007. zipfR: Word Frequency Distributions in R. In Proceedings of the 45th Annual Meeting of the Association for Computational Linguistics, Posters and Demonstrations Sessions, Prague.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Finding scientific topics", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Thomas", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Griffiths", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Steyvers", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the National Academy of Sciences of the United States of America", |
| "volume": "101", |
| "issue": "", |
| "pages": "5228--5235", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thomas L Griffiths and Mark Steyvers. 2004. Finding scientific topics. Proceedings of the National Academy of Sciences of the United States of America, 101:5228-5235.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "topicmodels: An R Package for Fitting Topic Models", |
| "authors": [ |
| { |
| "first": "Bettina", |
| "middle": [], |
| "last": "Gr\u00fcn", |
| "suffix": "" |
| }, |
| { |
| "first": "Kurt", |
| "middle": [], |
| "last": "Hornik", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Journal of Statistical Software", |
| "volume": "40", |
| "issue": "13", |
| "pages": "1--30", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bettina Gr\u00fcn and Kurt Hornik. 2011. topicmodels: An R Package for Fitting Topic Models. Journal of Statistical Software, 40(13):1-30.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "On Close and Distant Reading in Digital Humanities: A Survey and Future Challenges", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "S J\u00e4nicke", |
| "suffix": "" |
| }, |
| { |
| "first": "M F", |
| "middle": [], |
| "last": "Franzini", |
| "suffix": "" |
| }, |
| { |
| "first": "G", |
| "middle": [], |
| "last": "Cheema", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Scheuermann", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Eurographics Conference on Visualization (EuroVis)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "S J\u00e4nicke, G Franzini, M F Cheema, and G Scheuermann. 2015. On Close and Distant Reading in Digital Humanities: A Survey and Future Challenges. In R Borgo, F Ganovelli, and I Viola, editors, Eurographics Conference on Visualization (EuroVis).", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "ldatuning: Tuning of the Latent Dirichlet Allocation Models Parameters", |
| "authors": [ |
| { |
| "first": "Murzintcev", |
| "middle": [], |
| "last": "Nikita", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Murzintcev Nikita. 2020. ldatuning: Tuning of the Latent Dirichlet Allocation Models Parameters.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Opinion mining and sentiment analysis. Foundations and Trends in Information Retrieval", |
| "authors": [ |
| { |
| "first": "Bo", |
| "middle": [], |
| "last": "Pang", |
| "suffix": "" |
| }, |
| { |
| "first": "Lillian", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "", |
| "volume": "2", |
| "issue": "", |
| "pages": "1--135", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bo Pang and Lillian Lee. 2008. Opinion mining and sentiment analysis. Foundations and Trends in Information Retrieval, 2(2):1-135.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Sentiment analysis methods for understanding large-scale texts: a case for using continuum-scored words and word shift graphs", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Andrew", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Reagan", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Christopher", |
| "suffix": "" |
| }, |
| { |
| "first": "Brian", |
| "middle": [], |
| "last": "Danforth", |
| "suffix": "" |
| }, |
| { |
| "first": "Jake", |
| "middle": [ |
| "Ryland" |
| ], |
| "last": "Tivnan", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [ |
| "Sheridan" |
| ], |
| "last": "Williams", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Dodds", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "EPJ Data Science", |
| "volume": "6", |
| "issue": "1", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrew J Reagan, Christopher M Danforth, Brian Tivnan, Jake Ryland Williams, and Peter Sheridan Dodds. 2017. Sentiment analysis methods for understanding large-scale texts: a case for using continuum-scored words and word shift graphs. EPJ Data Science, 6(1).", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "lexicon: Lexicon Data", |
| "authors": [ |
| { |
| "first": "Tyler", |
| "middle": [], |
| "last": "Rinker", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tyler Rinker. 2018. lexicon: Lexicon Data.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "tidytext: Text Mining and Analysis Using Tidy Data Principles in R", |
| "authors": [ |
| { |
| "first": "Julia", |
| "middle": [], |
| "last": "Silge", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Robinson", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "JOSS", |
| "volume": "", |
| "issue": "3", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Julia Silge and David Robinson. 2006. tidytext: Text Mining and Analysis Using Tidy Data Principles in R. JOSS, 1(3).", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Personal diaries and letters from the First World War", |
| "authors": [], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "State Library of New South Wales. 2019. Personal diaries and letters from the First World War.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "State Library of New South Wales. 2020. Diarists from World War I", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "State Library of New South Wales. 2020. Diarists from World War I.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Probabilistic Topic Models", |
| "authors": [ |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Steyvers", |
| "suffix": "" |
| }, |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Griffiths", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Handbook of latent semantic analysis", |
| "volume": "", |
| "issue": "", |
| "pages": "427--448", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mark Steyvers and Tom Griffiths. 2007. Probabilistic Topic Models. In T Landauer, D McNamara, S Dennis, and W Kintsch, editors, Handbook of latent semantic analysis, pages 427-448. Lawrence Erlbaum Associates Publishers.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Sentiment Analysis: An Overview from Linguistics", |
| "authors": [ |
| { |
| "first": "Maite", |
| "middle": [], |
| "last": "Taboada", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Annual Review of Linguistics", |
| "volume": "2", |
| "issue": "1", |
| "pages": "325--347", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Maite Taboada. 2016. Sentiment Analysis: An Overview from Linguistics. Annual Review of Linguistics, 2(1):325-347, 1.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "zoo: S3 Infrastructure for Regular and Irregular Time Series", |
| "authors": [ |
| { |
| "first": "Achim", |
| "middle": [], |
| "last": "Zeileis", |
| "suffix": "" |
| }, |
| { |
| "first": "Gabor", |
| "middle": [], |
| "last": "Grothendieck", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Journal of Statistical Software", |
| "volume": "14", |
| "issue": "6", |
| "pages": "1--27", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Achim Zeileis and Gabor Grothendieck. 2005. zoo: S3 Infrastructure for Regular and Irregular Time Series. Journal of Statistical Software, 14(6):1-27.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "text": "Number of words written in our entire diary collection per month. The majority of entries are written between August 1914 and December 1919, however there were some entries as late as 1923.", |
| "type_str": "figure", |
| "num": null |
| }, |
| "FIGREF1": { |
| "uris": null, |
| "text": "Average sentiment analysis scores compared to the topic probabilities (except the Everyday Life and Home Again topics).", |
| "type_str": "figure", |
| "num": null |
| }, |
| "TABREF7": { |
| "text": "Top 54 terms for the Everyday Life topic with their probabilities.", |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td>rank term</td><td colspan=\"2\">beta rank term</td><td colspan=\"2\">beta rank term</td><td>beta</td></tr><tr><td>1 ship</td><td>0.0157</td><td>19 port</td><td>0.0043</td><td>37 ashore</td><td>0.0024</td></tr><tr><td>2 sydney</td><td>0.0092</td><td>20 deck</td><td>0.0040</td><td>38 drill</td><td>0.0024</td></tr><tr><td>3 german</td><td>0.0088</td><td>21 harbour</td><td>0.0040</td><td>39 crew</td><td>0.0024</td></tr><tr><td>4 captain</td><td>0.0074</td><td>22 emden</td><td>0.0038</td><td>40 flag</td><td>0.0023</td></tr><tr><td>5 officer</td><td>0.0073</td><td>23 naval</td><td>0.0035</td><td colspan=\"2\">41 herbertshohe 0.0023</td></tr><tr><td>6 boat</td><td>0.0073</td><td colspan=\"2\">24 administrator 0.0034</td><td>42 colombo</td><td>0.0023</td></tr><tr><td>7 board</td><td>0.0067</td><td>25 force</td><td>0.0034</td><td>43 commander</td><td>0.0023</td></tr><tr><td colspan=\"2\">8 lieutenant 0.0065</td><td>26 horse</td><td>0.0033</td><td>44 australia</td><td>0.0023</td></tr><tr><td>9 island</td><td>0.0064</td><td>27 melbourne</td><td>0.0031</td><td>45 holme</td><td>0.0022</td></tr><tr><td>10 troop</td><td>0.0058</td><td>28 major</td><td>0.0030</td><td>46 returned</td><td>0.0022</td></tr><tr><td>11 native</td><td>0.0056</td><td>29 government</td><td>0.0029</td><td>47 brigadier</td><td>0.0021</td></tr><tr><td>12 colonel</td><td>0.0050</td><td>30 cruiser</td><td>0.0027</td><td>48 sight</td><td>0.0020</td></tr><tr><td>13 wireless</td><td>0.0049</td><td>31 station</td><td>0.0027</td><td>49 convoy</td><td>0.0020</td></tr><tr><td>14 message</td><td>0.0048</td><td>32 fleet</td><td>0.0027</td><td>50 military</td><td>0.0020</td></tr><tr><td colspan=\"2\">15 company 0.0045</td><td>33 garrison</td><td>0.0027</td><td>51 signal</td><td>0.0020</td></tr><tr><td>16 rabaul</td><td>0.0045</td><td>34 steamer</td><td>0.0026</td><td>52 british</td><td>0.0020</td></tr><tr><td>17 received</td><td>0.0044</td><td>35 berrima</td><td>0.0025</td><td>53 war</td><td>0.0019</td></tr><tr><td>18 sea</td><td>0.0044</td><td>36 gun</td><td>0.0025</td><td>54 prisoner</td><td>0.0019</td></tr></table>" |
| }, |
| "TABREF8": { |
| "text": "Top 54 terms for the War at Sea topic with their probabilities.", |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td>rank term</td><td colspan=\"2\">beta rank term</td><td colspan=\"2\">beta rank term</td><td>beta</td></tr><tr><td>1 cairo</td><td>0.0170</td><td>19 water</td><td>0.0040</td><td colspan=\"2\">37 serapeum 0.0022</td></tr><tr><td>2 canal</td><td>0.0131</td><td>20 troop</td><td>0.0039</td><td>38 oclock</td><td>0.0022</td></tr><tr><td>3 camp</td><td>0.0103</td><td>21 kebir</td><td>0.0039</td><td>39 trench</td><td>0.0022</td></tr><tr><td>4 horse</td><td>0.0098</td><td colspan=\"2\">22 regiment 0.0039</td><td colspan=\"2\">40 squadron 0.0021</td></tr><tr><td>5 parade</td><td>0.0097</td><td>23 train</td><td>0.0039</td><td>41 piastre</td><td>0.0021</td></tr><tr><td>6 ship</td><td>0.0093</td><td>24 sea</td><td>0.0038</td><td>42 maadi</td><td>0.0021</td></tr><tr><td>7 sand</td><td>0.0088</td><td>25 suez</td><td>0.0036</td><td>43 arab</td><td>0.0021</td></tr><tr><td>8 tent</td><td>0.0080</td><td>26 deck</td><td>0.0035</td><td>44 colombo</td><td>0.0021</td></tr><tr><td>9 desert</td><td>0.0074</td><td colspan=\"2\">27 heliopoli 0.0035</td><td>45 soldier</td><td>0.0020</td></tr><tr><td>10 native</td><td>0.0071</td><td>28 sydney</td><td>0.0033</td><td>46 colonel</td><td>0.0020</td></tr><tr><td>11 el</td><td>0.0066</td><td colspan=\"2\">29 pyramid 0.0033</td><td>47 mosque</td><td>0.0020</td></tr><tr><td>12 drill</td><td>0.0060</td><td>30 island</td><td>0.0030</td><td>48 infantry</td><td>0.0020</td></tr><tr><td>13 egypt</td><td>0.0049</td><td>31 harbour</td><td>0.0028</td><td colspan=\"2\">49 christmas 0.0020</td></tr><tr><td>14 boat</td><td>0.0046</td><td>32 hot</td><td>0.0027</td><td>50 wharf</td><td>0.0019</td></tr><tr><td>15 egyptian</td><td>0.0045</td><td>33 ashore</td><td>0.0027</td><td>51 fuller</td><td>0.0019</td></tr><tr><td>16 tel</td><td>0.0043</td><td>34 nile</td><td>0.0027</td><td colspan=\"2\">52 signalling 0.0019</td></tr><tr><td>17 camel</td><td>0.0043</td><td colspan=\"2\">35 ismailia 0.0024</td><td colspan=\"2\">53 marching 0.0019</td></tr><tr><td colspan=\"2\">18 alexandria 0.0040</td><td>36 port</td><td>0.0023</td><td>54 fatigue</td><td>0.0019</td></tr></table>" |
| }, |
| "TABREF9": { |
| "text": "Top 54 terms for the Egypt topic with their probabilities.", |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td>rank term</td><td colspan=\"2\">beta rank term</td><td colspan=\"2\">beta rank term</td><td>beta</td></tr><tr><td>1 turk</td><td>0.0194</td><td>19 hospital</td><td>0.0048</td><td>37 island</td><td>0.0029</td></tr><tr><td>2 trench</td><td>0.0188</td><td>20 turkish</td><td>0.0046</td><td>38 dug</td><td>0.0029</td></tr><tr><td>3 gun</td><td>0.0130</td><td>21 quiet</td><td>0.0046</td><td colspan=\"2\">39 alexandria 0.0028</td></tr><tr><td>4 shell</td><td>0.0120</td><td>22 hill</td><td>0.0041</td><td>40 landed</td><td>0.0028</td></tr><tr><td colspan=\"2\">5 wounded 0.0101</td><td>23 rifle</td><td>0.0041</td><td>41 hit</td><td>0.0028</td></tr><tr><td>6 ship</td><td>0.0093</td><td>24 cairo</td><td>0.0041</td><td colspan=\"2\">42 aeroplane 0.0028</td></tr><tr><td>7 fire</td><td>0.0085</td><td>25 killed</td><td>0.0040</td><td>43 fired</td><td>0.0028</td></tr><tr><td>8 enemy</td><td>0.0081</td><td>26 line</td><td>0.0039</td><td>44 anzac</td><td>0.0027</td></tr><tr><td>9 firing</td><td>0.0076</td><td>27 shot</td><td>0.0038</td><td>45 machine</td><td>0.0027</td></tr><tr><td>10 beach</td><td>0.0072</td><td>28 bullet</td><td>0.0037</td><td>46 warship</td><td>0.0027</td></tr><tr><td>11 boat</td><td>0.0065</td><td colspan=\"2\">29 bombardment 0.0035</td><td>47 sniper</td><td>0.0026</td></tr><tr><td>12 position</td><td>0.0064</td><td>30 ashore</td><td>0.0034</td><td>48 pm</td><td>0.0026</td></tr><tr><td colspan=\"2\">13 shrapnel 0.0059</td><td>31 heavy</td><td>0.0034</td><td>49 casualty</td><td>0.0026</td></tr><tr><td>14 attack</td><td>0.0057</td><td>32 water</td><td>0.0032</td><td>50 damage</td><td>0.0025</td></tr><tr><td>15 bomb</td><td>0.0055</td><td>33 landing</td><td>0.0032</td><td>51 harbour</td><td>0.0025</td></tr><tr><td>16 battery</td><td>0.0053</td><td>34 gully</td><td>0.0032</td><td>52 board</td><td>0.0024</td></tr><tr><td>17 sea</td><td>0.0049</td><td>35 troop</td><td>0.0032</td><td>53 aboard</td><td>0.0023</td></tr><tr><td>18 artillery</td><td>0.0049</td><td>36 lemno</td><td>0.0030</td><td>54 dead</td><td>0.0023</td></tr></table>" |
| }, |
| "TABREF10": { |
| "text": "Top 99 terms for the Gallipoli topic with their probabilities.", |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td>rank term</td><td colspan=\"2\">beta rank term</td><td colspan=\"2\">beta rank term</td><td>beta</td></tr><tr><td>1 shell</td><td>0.0153</td><td colspan=\"2\">19 bombardment 0.0052</td><td>37 fire</td><td>0.0035</td></tr><tr><td>2 trench</td><td>0.0153</td><td>20 marched</td><td>0.0050</td><td>38 stunt</td><td>0.0034</td></tr><tr><td>3 gun</td><td>0.0138</td><td>21 firing</td><td>0.0048</td><td colspan=\"2\">39 evening 0.0033</td></tr><tr><td>4 line</td><td>0.0131</td><td>22 battery</td><td>0.0044</td><td>40 killed</td><td>0.0033</td></tr><tr><td>5 fritz</td><td>0.0096</td><td>23 enemy</td><td>0.0042</td><td>41 drill</td><td>0.0032</td></tr><tr><td>6 german</td><td>0.0076</td><td>24 battalion</td><td>0.0042</td><td colspan=\"2\">42 london 0.0032</td></tr><tr><td colspan=\"2\">7 wounded 0.0069</td><td>25 horse</td><td>0.0042</td><td>43 oclock</td><td>0.0032</td></tr><tr><td>8 artillery</td><td>0.0069</td><td>26 machine</td><td>0.0041</td><td colspan=\"2\">44 shelling 0.0031</td></tr><tr><td>9 front</td><td>0.0067</td><td>27 aeroplane</td><td>0.0041</td><td colspan=\"2\">45 fatigue 0.0031</td></tr><tr><td>10 billet</td><td>0.0066</td><td>28 division</td><td>0.0040</td><td>46 church</td><td>0.0031</td></tr><tr><td>11 gas</td><td>0.0065</td><td>29 casualty</td><td>0.0040</td><td>47 hut</td><td>0.0029</td></tr><tr><td>12 camp</td><td>0.0065</td><td>30 attack</td><td>0.0039</td><td>48 el</td><td>0.0029</td></tr><tr><td>13 bomb</td><td>0.0064</td><td>31 fine</td><td>0.0039</td><td>49 wet</td><td>0.0028</td></tr><tr><td>14 mile</td><td>0.0059</td><td>32 parade</td><td>0.0038</td><td colspan=\"2\">50 raining 0.0028</td></tr><tr><td>15 plane</td><td>0.0058</td><td>33 position</td><td>0.0037</td><td>51 wood</td><td>0.0028</td></tr><tr><td>16 village</td><td>0.0057</td><td>34 albert</td><td>0.0037</td><td>52 dug</td><td>0.0027</td></tr><tr><td>17 heavy</td><td>0.0053</td><td>35 france</td><td>0.0035</td><td>53 moved</td><td>0.0027</td></tr><tr><td>18 road</td><td>0.0053</td><td>36 taube</td><td>0.0035</td><td colspan=\"2\">54 tommy 0.0027</td></tr></table>" |
| }, |
| "TABREF11": { |
| "text": "Top 54 terms for the In the Trenches (Beginning) topic with their probabilities.", |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td>rank term</td><td colspan=\"2\">beta rank term</td><td colspan=\"2\">beta rank term</td><td>beta</td></tr><tr><td>1 road</td><td>0.0067</td><td>19 camel</td><td>0.0032</td><td colspan=\"2\">37 weather 0.0025</td></tr><tr><td>2 wrote</td><td>0.0057</td><td>20 raid</td><td>0.0031</td><td colspan=\"2\">38 shelling 0.0023</td></tr><tr><td>3 fritz</td><td>0.0055</td><td>21 barrage</td><td>0.0031</td><td>39 ridge</td><td>0.0023</td></tr><tr><td>4 ypre</td><td>0.0053</td><td colspan=\"2\">22 boulogne 0.0031</td><td>40 station</td><td>0.0023</td></tr><tr><td>5 gun</td><td>0.0048</td><td colspan=\"2\">23 wounded 0.0030</td><td>41 omer</td><td>0.0023</td></tr><tr><td>6 fine</td><td>0.0047</td><td>24 london</td><td>0.0030</td><td>42 lovely</td><td>0.0023</td></tr><tr><td colspan=\"2\">7 enemy 0.0047</td><td>25 walked</td><td>0.0029</td><td colspan=\"2\">43 deferred 0.0023</td></tr><tr><td colspan=\"2\">8 brigade 0.0045</td><td>26 raining</td><td>0.0028</td><td>44 rain</td><td>0.0022</td></tr><tr><td>9 train</td><td>0.0045</td><td>27 letter</td><td>0.0028</td><td>45 report</td><td>0.0022</td></tr><tr><td>10 cold</td><td>0.0043</td><td>28 pt</td><td>0.0027</td><td>46 moved</td><td>0.0021</td></tr><tr><td>11 dinner</td><td>0.0041</td><td>29 sister</td><td>0.0027</td><td>47 stunt</td><td>0.0021</td></tr><tr><td>12 bomb</td><td>0.0040</td><td>30 paris</td><td>0.0027</td><td>48 book</td><td>0.0021</td></tr><tr><td>13 line</td><td>0.0039</td><td>31 plane</td><td>0.0026</td><td>49 battery</td><td>0.0021</td></tr><tr><td>14 hut</td><td>0.0038</td><td>32 farm</td><td>0.0026</td><td>50 dump</td><td>0.0020</td></tr><tr><td>15 lorry</td><td>0.0037</td><td>33 de</td><td>0.0026</td><td>51 lunch</td><td>0.0019</td></tr><tr><td colspan=\"2\">16 bailleul 0.0037</td><td>34 miss</td><td>0.0026</td><td colspan=\"2\">52 battalion 0.0019</td></tr><tr><td>17 shell</td><td>0.0034</td><td colspan=\"2\">35 machine 0.0025</td><td colspan=\"2\">53 division 0.0019</td></tr><tr><td>18 fed</td><td>0.0033</td><td>36 messine</td><td>0.0025</td><td>54 le</td><td>0.0019</td></tr></table>" |
| }, |
| "TABREF12": { |
| "text": "Top 54 terms for the In the Trenches (Middle) topic with their probabilities.", |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td>rank term</td><td colspan=\"2\">beta rank term</td><td colspan=\"2\">beta rank term</td><td>beta</td></tr><tr><td>1 fritz</td><td>0.0144</td><td>19 battery</td><td>0.0045</td><td colspan=\"2\">37 captured 0.0027</td></tr><tr><td>2 gun</td><td>0.0128</td><td>20 amien</td><td>0.0043</td><td>38 forward</td><td>0.0026</td></tr><tr><td>3 line</td><td>0.0127</td><td>21 quiet</td><td>0.0042</td><td>39 move</td><td>0.0026</td></tr><tr><td>4 enemy</td><td>0.0106</td><td>22 moved</td><td>0.0042</td><td colspan=\"2\">40 american 0.0025</td></tr><tr><td>5 shell</td><td>0.0095</td><td>23 somme</td><td>0.0040</td><td>41 wood</td><td>0.0024</td></tr><tr><td>6 front</td><td>0.0078</td><td colspan=\"2\">24 evening 0.0038</td><td>42 shelled</td><td>0.0024</td></tr><tr><td>7 plane</td><td>0.0072</td><td>25 trench</td><td>0.0038</td><td>43 hot</td><td>0.0024</td></tr><tr><td>8 village</td><td>0.0068</td><td>26 stunt</td><td>0.0036</td><td>44 advance</td><td>0.0024</td></tr><tr><td>9 road</td><td>0.0065</td><td>27 gas</td><td>0.0036</td><td>45 tank</td><td>0.0024</td></tr><tr><td colspan=\"2\">10 battalion 0.0064</td><td colspan=\"2\">28 shelling 0.0034</td><td>46 dug</td><td>0.0023</td></tr><tr><td>11 hun</td><td>0.0059</td><td colspan=\"2\">29 machine 0.0032</td><td colspan=\"2\">47 casualty 0.0023</td></tr><tr><td colspan=\"2\">12 prisoner 0.0059</td><td>30 viller</td><td>0.0032</td><td>48 lorry</td><td>0.0023</td></tr><tr><td>13 bomb</td><td>0.0053</td><td>31 dugout</td><td>0.0031</td><td>49 valley</td><td>0.0023</td></tr><tr><td>14 division</td><td>0.0051</td><td>32 french</td><td>0.0030</td><td>50 aussie</td><td>0.0023</td></tr><tr><td colspan=\"2\">15 wounded 0.0047</td><td>33 heavy</td><td>0.0030</td><td>51 river</td><td>0.0022</td></tr><tr><td>16 position</td><td>0.0047</td><td colspan=\"2\">34 barrage 0.0029</td><td>52 dump</td><td>0.0022</td></tr><tr><td>17 fine</td><td>0.0046</td><td>35 le</td><td>0.0028</td><td>53 night</td><td>0.0021</td></tr><tr><td>18 attack</td><td>0.0045</td><td>36 la</td><td>0.0027</td><td>54 kilo</td><td>0.0021</td></tr></table>" |
| }, |
| "TABREF13": { |
| "text": "Top 54 terms for the In the Trenches (End) topic with their probabilities.", |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td>rank term</td><td colspan=\"2\">beta rank term</td><td colspan=\"2\">beta rank term</td><td>beta</td></tr><tr><td>1 cold</td><td>0.0429</td><td>19 parcel</td><td>0.0033</td><td>37 foggy</td><td>0.0022</td></tr><tr><td>2 snow</td><td>0.0260</td><td>20 fler</td><td>0.0032</td><td>38 albert</td><td>0.0022</td></tr><tr><td>3 mud</td><td>0.0150</td><td>21 camel</td><td>0.0030</td><td>39 harness</td><td>0.0022</td></tr><tr><td colspan=\"2\">4 christmas 0.0140</td><td>22 stable</td><td>0.0029</td><td>40 thick</td><td>0.0022</td></tr><tr><td>5 hut</td><td>0.0075</td><td>23 rum</td><td>0.0029</td><td>41 ribemont</td><td>0.0022</td></tr><tr><td>6 frost</td><td>0.0073</td><td>24 ration</td><td>0.0028</td><td>42 patient</td><td>0.0021</td></tr><tr><td>7 frozen</td><td>0.0071</td><td>25 freezing</td><td>0.0028</td><td>43 delville</td><td>0.0020</td></tr><tr><td>8 el</td><td>0.0070</td><td colspan=\"2\">26 miserable 0.0026</td><td>44 thaw</td><td>0.0020</td></tr><tr><td>9 snowing</td><td>0.0064</td><td>27 frosty</td><td>0.0026</td><td>45 le</td><td>0.0020</td></tr><tr><td>10 fritz</td><td>0.0063</td><td>28 wind</td><td>0.0026</td><td>46 amien</td><td>0.0019</td></tr><tr><td>11 dugout</td><td>0.0060</td><td>29 rafa</td><td>0.0025</td><td>47 blighty</td><td>0.0019</td></tr><tr><td>12 arish</td><td>0.0055</td><td>30 desert</td><td>0.0024</td><td>48 bazentin</td><td>0.0018</td></tr><tr><td>13 wood</td><td>0.0053</td><td>31 taube</td><td>0.0024</td><td>49 hun</td><td>0.0018</td></tr><tr><td>14 ice</td><td>0.0052</td><td>32 mametz</td><td>0.0024</td><td>50 sleet</td><td>0.0018</td></tr><tr><td>15 foot</td><td>0.0051</td><td>33 walked</td><td>0.0024</td><td>51 needle</td><td>0.0017</td></tr><tr><td>16 blanket</td><td>0.0038</td><td>34 fricourt</td><td>0.0024</td><td>52 ground</td><td>0.0017</td></tr><tr><td>17 bitterly</td><td>0.0037</td><td>35 snowed</td><td>0.0023</td><td>53 cleaning</td><td>0.0017</td></tr><tr><td>18 muddy</td><td>0.0033</td><td>36 dump</td><td>0.0022</td><td colspan=\"2\">54 headquater 0.0017</td></tr></table>" |
| }, |
| "TABREF14": { |
| "text": "Top 54 terms for the White Christmas topic with their probabilities.", |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td>rank term</td><td colspan=\"2\">beta rank term</td><td colspan=\"2\">beta rank term</td><td>beta</td></tr><tr><td>1 train</td><td>0.0084</td><td>19 car</td><td>0.0035</td><td>37 noon</td><td>0.0023</td></tr><tr><td>2 boat</td><td>0.0069</td><td>20 met</td><td>0.0034</td><td>38 ashore</td><td>0.0023</td></tr><tr><td>3 ship</td><td>0.0069</td><td>21 troop</td><td>0.0034</td><td>39 city</td><td>0.0023</td></tr><tr><td>4 fine</td><td>0.0065</td><td>22 person</td><td>0.0032</td><td>40 cold</td><td>0.0022</td></tr><tr><td>5 town</td><td>0.0060</td><td>23 walked</td><td>0.0031</td><td>41 picture</td><td>0.0022</td></tr><tr><td>6 sea</td><td>0.0053</td><td>24 lunch</td><td>0.0031</td><td>42 le</td><td>0.0021</td></tr><tr><td colspan=\"2\">7 london 0.0050</td><td>25 bed</td><td>0.0030</td><td>43 passed</td><td>0.0021</td></tr><tr><td colspan=\"2\">8 evening 0.0049</td><td>26 house</td><td>0.0029</td><td colspan=\"2\">44 germany 0.0021</td></tr><tr><td>9 home</td><td>0.0047</td><td>27 board</td><td>0.0029</td><td colspan=\"2\">45 charleroi 0.0020</td></tr><tr><td>10 hotel</td><td>0.0047</td><td>28 dance</td><td>0.0029</td><td>46 concert</td><td>0.0020</td></tr><tr><td>11 deck</td><td>0.0046</td><td>29 war</td><td>0.0029</td><td>47 snow</td><td>0.0020</td></tr><tr><td>12 pm</td><td>0.0043</td><td colspan=\"2\">30 afternoon 0.0029</td><td>48 class</td><td>0.0020</td></tr><tr><td>13 de</td><td>0.0043</td><td>31 street</td><td>0.0028</td><td>49 aboard</td><td>0.0020</td></tr><tr><td>14 dinner</td><td>0.0041</td><td>32 girl</td><td>0.0027</td><td colspan=\"2\">50 armistice 0.0020</td></tr><tr><td>15 port</td><td>0.0041</td><td>33 australia</td><td>0.0025</td><td>51 hut</td><td>0.0019</td></tr><tr><td>16 walk</td><td>0.0038</td><td>34 visited</td><td>0.0024</td><td>52 billet</td><td>0.0019</td></tr><tr><td>17 leave</td><td>0.0037</td><td>35 office</td><td>0.0024</td><td>53 lorry</td><td>0.0019</td></tr><tr><td>18 paris</td><td>0.0035</td><td>36 aussie</td><td>0.0024</td><td>54 engine</td><td>0.0019</td></tr></table>" |
| }, |
| "TABREF15": { |
| "text": "Top 99 terms for the After the Armistice topic with their probabilities.", |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table><tr><td>rank term</td><td colspan=\"2\">beta rank term</td><td colspan=\"2\">beta rank term</td><td>beta</td></tr><tr><td>1 home</td><td>0.0300</td><td>19 sit</td><td>0.0044</td><td>37 train</td><td>0.0028</td></tr><tr><td>2 meet</td><td>0.0160</td><td>20 elli</td><td>0.0042</td><td>38 time</td><td>0.0026</td></tr><tr><td>3 boat</td><td>0.0126</td><td>21 tram</td><td>0.0041</td><td>39 card</td><td>0.0026</td></tr><tr><td>4 pm</td><td>0.0104</td><td>22 dick</td><td>0.0040</td><td>40 write</td><td>0.0025</td></tr><tr><td>5 tea</td><td>0.0098</td><td>23 miss</td><td>0.0040</td><td>41 arrive</td><td>0.0025</td></tr><tr><td>6 play</td><td>0.0092</td><td>24 tickle</td><td>0.0039</td><td>42 read</td><td>0.0022</td></tr><tr><td>7 ring</td><td>0.0086</td><td>25 wrote</td><td>0.0036</td><td>43 spend</td><td>0.0022</td></tr><tr><td>8 catch</td><td>0.0082</td><td>26 dine</td><td>0.0035</td><td>44 night</td><td>0.0021</td></tr><tr><td>9 bed</td><td>0.0070</td><td>27 drive</td><td>0.0035</td><td>45 chat</td><td>0.0021</td></tr><tr><td>10 manly</td><td>0.0063</td><td>28 roy</td><td>0.0034</td><td>46 dinner</td><td>0.0021</td></tr><tr><td>11 mum</td><td>0.0058</td><td>29 day</td><td>0.0033</td><td>47 visit</td><td>0.0020</td></tr><tr><td>12 dad</td><td>0.0054</td><td>30 piano</td><td>0.0033</td><td>48 sleep</td><td>0.0020</td></tr><tr><td>13 walk</td><td>0.0052</td><td colspan=\"2\">31 middle 0.0032</td><td>49 cut</td><td>0.0019</td></tr><tr><td colspan=\"2\">14 paddock 0.0050</td><td>32 talk</td><td>0.0031</td><td>50 lopped</td><td>0.0019</td></tr><tr><td>15 town</td><td>0.0049</td><td>33 otto</td><td>0.0030</td><td colspan=\"2\">51 meeting 0.0019</td></tr><tr><td>16 music</td><td>0.0046</td><td>34 swim</td><td>0.0029</td><td>52 dave</td><td>0.0019</td></tr><tr><td>17 garden</td><td>0.0045</td><td>35 rain</td><td>0.0029</td><td>53 girl</td><td>0.0018</td></tr><tr><td>18 george</td><td>0.0045</td><td>36 stay</td><td>0.0029</td><td>54 wharf</td><td>0.0018</td></tr></table>" |
| }, |
| "TABREF16": { |
| "text": "Top 54 terms for the Home Again topic with their probabilities.", |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table/>" |
| } |
| } |
| } |
| } |