| { |
| "paper_id": "W16-0317", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T03:58:57.632643Z" |
| }, |
| "title": "Mental Distress Detection and Triage in Forum Posts: The LT3 CLPsych 2016 Shared Task System", |
| "authors": [ |
| { |
| "first": "Bart", |
| "middle": [], |
| "last": "Desmet", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "bart.desmet@ugent.be" |
| }, |
| { |
| "first": "Gilles", |
| "middle": [], |
| "last": "Jacobs", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "gillesm.jacobs@ugent.be" |
| }, |
| { |
| "first": "V\u00e9ronique", |
| "middle": [], |
| "last": "Hoste", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "veronique.hoste@ugent.be" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "This paper describes the contribution of LT3 for the CLPsych 2016 Shared Task on automatic triage of mental health forum posts. Our systems use multiclass Support Vector Machines (SVM), cascaded binary SVMs and ensembles with a rich feature set. The best systems obtain macro-averaged F-scores of 40% on the full task and 80% on the green versus alarming distinction. Multiclass SVMs with all features score best in terms of F-score, whereas feature filtering with bi-normal separation and classifier ensembling are found to improve recall of alarming posts.", |
| "pdf_parse": { |
| "paper_id": "W16-0317", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "This paper describes the contribution of LT3 for the CLPsych 2016 Shared Task on automatic triage of mental health forum posts. Our systems use multiclass Support Vector Machines (SVM), cascaded binary SVMs and ensembles with a rich feature set. The best systems obtain macro-averaged F-scores of 40% on the full task and 80% on the green versus alarming distinction. Multiclass SVMs with all features score best in terms of F-score, whereas feature filtering with bi-normal separation and classifier ensembling are found to improve recall of alarming posts.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "The 2016 ACL Workshop on Computational Linguistics and Clinical Psychology included a shared task focusing on triage classification in forum posts from ReachOut.com, an online service for youth mental health issues. The aim is to automatically classify an unseen post as one of four categories indicating the severity of mental distress. Rea-chOut staff has annotated a corpus of posts with crisis/red/amber/green semaphore labels that indicate how urgently a post needs moderator attention.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The system described in this paper is based on a suicidality classification system intended for Dutch social media (Desmet and Hoste, 2014) . Therefore, we approach the current mental distress triage task from a suicide detection standpoint.", |
| "cite_spans": [ |
| { |
| "start": 115, |
| "end": 139, |
| "text": "(Desmet and Hoste, 2014)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Machine learning and natural language processing have already shown potential in modelling and de-tecting suicidality in the arts (Stirman and Pennebaker, 2001; Mulholland and Quinn, 2013) and in electronic health records (Haerian et al., 2012) . However, work on computational approaches to the automatic detection of suicidal content in online user-generated media is scarce.", |
| "cite_spans": [ |
| { |
| "start": 130, |
| "end": 160, |
| "text": "(Stirman and Pennebaker, 2001;", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 161, |
| "end": 188, |
| "text": "Mulholland and Quinn, 2013)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 222, |
| "end": 244, |
| "text": "(Haerian et al., 2012)", |
| "ref_id": "BIBREF8" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "One line of research focuses on detecting suicidality in individuals relying on their post history: Huang et al. (2007) aim to identify Myspace.com bloggers at risk of suicide by means of a keywordbased approach using a manually collected dictionary of weighted suicide-related terms. Users were ranked by pattern-matching keywords on their posts. This approach suffered from low precision (35%) and the data does not allow to measure recall, i.e. the number of actually suicidal bloggers that are missing from the results. Similarly, Jashinsky et al. (2014) manually selected keywords by testing search queries linked to various risk factors in a user's Twitter profile. In order to validate this search approach, users posting tweets that match the suicide keywords were grouped by US state for trend analysis. The proportion of at-risk tweeters vs. control-group tweeters were strongly correlated with the actual state suicide rates. While this methodology yields a correct proportion of at-risk users, it is unclear how many of those tweets are false positives and how many at-risk tweets are missing.", |
| "cite_spans": [ |
| { |
| "start": 100, |
| "end": 119, |
| "text": "Huang et al. (2007)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 535, |
| "end": 558, |
| "text": "Jashinsky et al. (2014)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Going beyond a keyword-based approach, Guan et al. (2015) performed linear regression and random forest machine learning for Chinese Weibo.com microbloggers. Suicidality labels were assigned to users in the data set by means of an online psychological evaluation survey. As classification features they took social media profile metadata and psychometric linguistic categories in a user's post history. Results showed that Linear Regression and Random Forest classifiers obtain similar scores with a maximum of 35% F-score (23% precision and 79% recall) being the highest performance.", |
| "cite_spans": [ |
| { |
| "start": 39, |
| "end": 57, |
| "text": "Guan et al. (2015)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "As in the CLPsych 2016 Shared Task, another line of research aims to classify suicidality on the post level, rather than the level of user profiles. Desmet and Hoste (2014) proposed a detection approach using machine learning with a rich feature set on posts in the Dutch social media platform Netlog. Their corpus was manually annotated by suicide intervention experts for suicide relevance, risk and protective factors, source origin, subject of content, and severity. Two binary classification tasks were formulated: a relevance task which aimed to detect posts relevant to suicide, and a threat detection task to detect messages that indicate a severe suicide risk. For the threat detection task, a cascaded setup which first filters irrelevant messages with SVM and then predicts the severity with k-Nearest Neighbors (KNN) performed best: 59.2% F-score (69.5% precision and 51.6% recall). In general, both KNN and SVM outperform Naive Bayes and SVM was more robust to the inclusion of bad features. The system presented in this paper is for the most part an extension and English adaptation of this suicidal post detection pipeline.", |
| "cite_spans": [ |
| { |
| "start": 149, |
| "end": 172, |
| "text": "Desmet and Hoste (2014)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We investigated a supervised classification-based approach to the mental distress triage task using SVMs. Below, we describe the data and features that were used, and the way classifiers were built, optimized and combined.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "System Overview", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Labeled data sets: 1/8th of the manually annotated training data was sampled as a held-out development set (n = 118 with at least 4 instances of each class), the remainder (n = 829) was used for training. In the results section, we also report on the held-out test set (n = 241).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Reddit background corpus: In order to perform terminology extraction and topic modelling, we collected domain-relevant text from Reddit.com, a pre-dominantly English social news and bulletin board website. We used the title and body text from all opening posts in mental health and suicide-related boards posted between 2006 and 2014, resulting in a 82.7 million token corpus of over 270, 000 posts. The selected boards mainly contain user-generated discussion on mental health, depression, and suicidal thoughts, similar to the ReachOut forums.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Tokenization and preprocessing: All textual data was tokenized and lower-cased to reduce variation. For topic modelling, emoji and punctuation were removed. Pattern (De Smedt and Daelemans, 2012) was used for lemmatization.", |
| "cite_spans": [ |
| { |
| "start": 165, |
| "end": 195, |
| "text": "(De Smedt and Daelemans, 2012)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "We aimed to develop a rich feature set that focused on lexical and semantic information, with fine-grained and more abstract representations of content. Some syntactic and non-linguistic features were also included.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Bag-of-words features: We included binary token unigrams, bigrams and trigrams, along with character trigrams and fourgrams. The latter provide robustness to the spelling variation typically found in social media.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Term lists: Domain-specific multiword terms were derived from the Reddit background corpus, using the TExSIS terminology extraction tool (Macken et al., 2013) . One list was based on suicidespecific boards (/r/SuicideWatch and /r/suicidenotes, 2884 terms), the other included terms only found in other mental health boards (1384 terms).", |
| "cite_spans": [ |
| { |
| "start": 137, |
| "end": 158, |
| "text": "(Macken et al., 2013)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Lexicon features: We computed positive and negative opinion word ratio and overall post sentiment using both the MPQA (Wilson et al., 2005) and Hu and Liu's (2004) opinion lexicons. We added positive, negative and neutral emoji counts based on the BOUNCE emoji sentiment lexicon (K\u00f6kciyan et al., 2013) . We also included the relative frequency of all 64 psychometric categories in the Linguistic Inquiry and Word Count (LIWC) dictionary (Pennebaker et al., 2007) . LIWC features have proven useful in (Stirman and Pennebaker, 2001) for modelling suicidality in literary works. Furthermore, we included diminisher, intensifier, negation, and \"allness\" lexica because of their significance in suicide notes analysis (Osgood and Walker, 1959; Gottschalk and Gleser, 1960; Shapero, 2011) .", |
| "cite_spans": [ |
| { |
| "start": 118, |
| "end": 139, |
| "text": "(Wilson et al., 2005)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 144, |
| "end": 163, |
| "text": "Hu and Liu's (2004)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 279, |
| "end": 302, |
| "text": "(K\u00f6kciyan et al., 2013)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 438, |
| "end": 463, |
| "text": "(Pennebaker et al., 2007)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 502, |
| "end": 532, |
| "text": "(Stirman and Pennebaker, 2001)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 715, |
| "end": 740, |
| "text": "(Osgood and Walker, 1959;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 741, |
| "end": 769, |
| "text": "Gottschalk and Gleser, 1960;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 770, |
| "end": 784, |
| "text": "Shapero, 2011)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Topic models: Using the gensim topic modelling library (\u0158eh\u016f\u0159ek and Sojka, 2010) we trained several LDA (Blei et al., 2003) and LSI (Deerwester et al., 1990) topic models with varying granularity (k = 20, 50, 100, 200). A similarity query was done on each model resulting in two feature groups: k topic similarity scores and the average similarity score. This should allow the classifier to learn which latent topics are relevant for the task, and to what extent the topics align with the ones in the Reddit background corpus. In line with Resnik et al. (2015) , we used topic models to capture latent semantic and syntactic structure in the mental health domain. However, we did not include supervised topic models.", |
| "cite_spans": [ |
| { |
| "start": 104, |
| "end": 123, |
| "text": "(Blei et al., 2003)", |
| "ref_id": null |
| }, |
| { |
| "start": 132, |
| "end": 157, |
| "text": "(Deerwester et al., 1990)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 540, |
| "end": 560, |
| "text": "Resnik et al. (2015)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Syntactic features: Two binary features were implemented indicating whether the imperative mood was used in a post and whether person alternation occurred (i.e. combinations of first and second person pronouns).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Post metadata: We furthermore included several non-linguistic features based on a post's metadata: the time of day a post was made (expressed in three-hour blocks), the board in which it was posted, whether the post includes a subject line or a URL, the role of the author and whether he or she is a moderator, whether the post is the first in a thread, whether there are (moderator) reactions or kudos (i.e. thumbs-up votes).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "When applied to the training data, this resulted in 59 feature groups and 107, 852 individual features, the majority of which were bag-of-words features (almost 96%).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Features", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Using SVMs, we tested three different approaches to the problem of correctly assigning the four triage labels to the forum posts. We considered detection of posts with a high level of alarm (crisis or red) to be the priority. Where possible, recall of the priority labels was promoted, since false negatives are most problematic there.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Classifiers", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "With multiclass SVMs, one model is used to predict all four labels at once. We hypothesized that distinguishing green from non-green posts would require different information than detecting the more alarming categories. We therefore also tested cascades of three binary SVMs, in which each classi-fier predicts a higher level of alarm: green vs. rest; red or crisis vs. rest; and crisis vs. rest. The binary results are combined in a way that the label with the highest level of alarm is assigned. This essentially sacrifices some precision on lower-priority classes for better high-priority recall.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Classifiers", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Finally, we tested ensembles of various multiclass and binary systems. Predictions were combined with two voting methods: normal majority voting (reported as ensemble-majority), and crisispriority voting (ensemble-priority) where the most alarming label with at least 2 votes is selected.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Classifiers", |
| "sec_num": "3.3" |
| }, |
| { |
| "text": "Typically, the performance of a machine learning algorithm is not optimal when it is used with all implemented features and with the default algorithm settings. SVMs are known to perform well in the presence of irrelevant features, but dimensionality reduction can still be beneficial for classification accuracy and resource usage. In this section, we describe the methods we tested for feature selection and hyperparameter optimization.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Optimization", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "With feature filtering, a metric is used to determine the informativeness of each feature, given the training data. Yang (1997) found that Information Gain (IG) allows aggressive feature removal with minimal loss in accuracy. Forman (2003) corroborates this finding, but remarks that IG is biased towards the majority class, unlike the Bi-Normal Separation (BNS) metric, which typically achieves better minority class recall. In the results, we compare both filtering methods (-ig and -bns) to no filtering (-nf ). IG was applied with a threshold of 0.005 (92-97% reduction), BNS with threshold 3 (79-93% reduction for binary tasks, no multiclass support).", |
| "cite_spans": [ |
| { |
| "start": 116, |
| "end": 127, |
| "text": "Yang (1997)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 226, |
| "end": 239, |
| "text": "Forman (2003)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Optimization", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "We also applied wrapped optimization, where combinations of selected feature groups and hyperparameters are evaluated with SVM using threefold crossvalidation. Exhaustive exploration of all combinations was not possible, so we used genetic algorithms to approximate an optimal solution (Desmet et al., 2013) . In the results section, all reported systems have been optimized for feature group and hyperparameter selection, except for multiclass-unopt (baseline without filtering or optimization) and multiclass-hyper (only hyperparameter optimization, no feature filtering or selection).", |
| "cite_spans": [ |
| { |
| "start": 286, |
| "end": 307, |
| "text": "(Desmet et al., 2013)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Optimization", |
| "sec_num": "3.4" |
| }, |
| { |
| "text": "In Table 4 , we report the four-label classification results of all systems. Most systems perform well in comparison to the shared task top score of 42% macro-averaged F-score, with the multiclass-nf submission scoring highest at 40%. This indicates that the implemented features and approach are within the current state of the art. Arguably, macro-averaged F-score is a harsh metric for this task: it treats the three alarming categories as disjunct, although confusion between those classes can be high and the distinction may not matter much from a usability perspective. Since the test set only contained one crisis instance, failing to detect it effectively limits the ceiling for macroaveraged F-score to 67%. This partly explains the low scores in Table 4 . For comparison, we list Fscore, precision and recall for the green vs. alarming distinction in Table 4 . Alarming posts can be detected with F = 80% and recall up to 89% (ensemble-priority). We tested three classifier configurations, and find that a multiclass approach performs as well as or better than more complex systems. On the development data, ensemble systems perform best, although this is not confirmed by the four-label test results, possibly due to paucity of crisis instances. It appears that ensembles are a sensible choice especially if recall is important. This may be due to the inclusion of the high-recall binary-bns cascade, the low precision of which is offset by ensemble voting. Overall, the aim of improving recall with cascaded and ensemble classifiers seems to have been effective: compared to multiclass systems, they all favour recall over precision more, both on development and test data.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 3, |
| "end": 10, |
| "text": "Table 4", |
| "ref_id": null |
| }, |
| { |
| "start": 756, |
| "end": 763, |
| "text": "Table 4", |
| "ref_id": null |
| }, |
| { |
| "start": 861, |
| "end": 868, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results and discussion", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The unoptimized multiclass-unopt acts as a majority baseline that always predicts green, indicating that hyperparameter optimization is essential. Feature selection, on the other hand, does not yield such a clear benefit. On the held-out test data, the nf systems consistently outperform their ig and bns counterparts in terms of F-score. On the development data, feature filtering has a positive effect on recall, particularly when BNS is applied. In summary, the applied feature selection techniques are sometimes successful in removing the bulk of the features without harming performance, although the results suggest that they may remove too many or cause overfitting.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Results and discussion", |
| "sec_num": "4" |
| }, |
| { |
| "text": "This paper discussed an SVM-based approach to the CLPsych 2016 shared task. We found that our systems performed well within the state of the art, with macro-averaged F-scores of 40% on the full task, and 80% for the distinction between green and alarming posts, suggesting that confusion between the three alarming classes is high. Multiclass systems performed best, but ensemble classifiers and feature filtering with BNS perform comparably and are better suited when high recall is required.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We would like to thank the organizers for an interesting shared task. This work was carried out in the framework of AMiCA (IWT SBO-project 120007), funded by the Flemish government agency for Innovation by Science and Technology (IWT).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Pattern for python", |
| "authors": [ |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "De", |
| "suffix": "" |
| }, |
| { |
| "first": "Smedt", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Walter", |
| "middle": [], |
| "last": "Daelemans", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "The Journal of Machine Learning Research", |
| "volume": "13", |
| "issue": "1", |
| "pages": "2063--2067", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tom De Smedt and Walter Daelemans. 2012. Pattern for python. The Journal of Machine Learning Research, 13(1):2063-2067.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Indexing by latent semantic analysis", |
| "authors": [ |
| { |
| "first": "Scott", |
| "middle": [], |
| "last": "Deerwester", |
| "suffix": "" |
| }, |
| { |
| "first": "Susan", |
| "middle": [], |
| "last": "Dumais", |
| "suffix": "" |
| }, |
| { |
| "first": "George", |
| "middle": [], |
| "last": "Furnas", |
| "suffix": "" |
| }, |
| { |
| "first": "Thomas", |
| "middle": [], |
| "last": "Landauer", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Harshman", |
| "suffix": "" |
| } |
| ], |
| "year": 1990, |
| "venue": "Journal of the American society for information science", |
| "volume": "41", |
| "issue": "6", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Scott Deerwester, Susan Dumais, George Furnas, Thomas Landauer, and Richard Harshman. 1990. In- dexing by latent semantic analysis. Journal of the American society for information science, 41(6):391.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Recognising suicidal messages in dutch social media", |
| "authors": [ |
| { |
| "first": "Bart", |
| "middle": [], |
| "last": "Desmet", |
| "suffix": "" |
| }, |
| { |
| "first": "V\u00e9ronique", |
| "middle": [], |
| "last": "Hoste", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "9th International Conference on Language Resources and Evaluation (LREC)", |
| "volume": "", |
| "issue": "", |
| "pages": "830--835", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bart Desmet and V\u00e9ronique Hoste. 2014. Recognis- ing suicidal messages in dutch social media. In 9th International Conference on Language Resources and Evaluation (LREC), pages 830-835.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Gallop documentation. LT3 Technical report", |
| "authors": [ |
| { |
| "first": "Bart", |
| "middle": [], |
| "last": "Desmet", |
| "suffix": "" |
| }, |
| { |
| "first": "V\u00e9ronique", |
| "middle": [], |
| "last": "Hoste", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Verstraeten", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "13--16", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bart Desmet, V\u00e9ronique Hoste, David Verstraeten, and Jan Verhasselt. 2013. Gallop documentation. LT3 Technical report, pages 13-03.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "An extensive empirical study of feature selection metrics for text classification", |
| "authors": [ |
| { |
| "first": "George", |
| "middle": [], |
| "last": "Forman", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "The Journal of machine learning research", |
| "volume": "3", |
| "issue": "", |
| "pages": "1289--1305", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "George Forman. 2003. An extensive empirical study of feature selection metrics for text classification. The Journal of machine learning research, 3:1289-1305.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "An analysis of the verbal content of suicide notes", |
| "authors": [ |
| { |
| "first": "Louis", |
| "middle": [], |
| "last": "Gottschalk", |
| "suffix": "" |
| }, |
| { |
| "first": "Goldine", |
| "middle": [], |
| "last": "Gleser", |
| "suffix": "" |
| } |
| ], |
| "year": 1960, |
| "venue": "British Journal of Medical Psychology", |
| "volume": "33", |
| "issue": "3", |
| "pages": "195--204", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Louis Gottschalk and Goldine Gleser. 1960. An analysis of the verbal content of suicide notes. British Journal of Medical Psychology, 33(3):195-204.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Identifying chinese microblog users with high suicide probability using internetbased profile and linguistic features: Classification model", |
| "authors": [ |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Guan", |
| "suffix": "" |
| }, |
| { |
| "first": "Bibo", |
| "middle": [], |
| "last": "Hao", |
| "suffix": "" |
| }, |
| { |
| "first": "Qijin", |
| "middle": [], |
| "last": "Cheng", |
| "suffix": "" |
| }, |
| { |
| "first": "S", |
| "middle": [ |
| "F" |
| ], |
| "last": "Paul", |
| "suffix": "" |
| }, |
| { |
| "first": "Tingshao", |
| "middle": [], |
| "last": "Yip", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "JMIR mental health", |
| "volume": "2", |
| "issue": "2", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Li Guan, Bibo Hao, Qijin Cheng, Paul SF Yip, and Tingshao Zhu. 2015. Identifying chinese microblog users with high suicide probability using internet- based profile and linguistic features: Classification model. JMIR mental health, 2(2):17.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Methods for identifying suicide or suicidal ideation in EHRs", |
| "authors": [ |
| { |
| "first": "Krystl", |
| "middle": [], |
| "last": "Haerian", |
| "suffix": "" |
| }, |
| { |
| "first": "Hojjat", |
| "middle": [], |
| "last": "Salmasian", |
| "suffix": "" |
| }, |
| { |
| "first": "Carol", |
| "middle": [], |
| "last": "Friedman", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "AMIA Annual Symposium Proceedings", |
| "volume": "2012", |
| "issue": "", |
| "pages": "1244--1253", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Krystl Haerian, Hojjat Salmasian, and Carol Friedman. 2012. Methods for identifying suicide or suicidal ideation in EHRs. In AMIA Annual Symposium Pro- ceedings, volume 2012, pages 1244-1253. American Medical Informatics Association.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Mining and summarizing customer reviews", |
| "authors": [ |
| { |
| "first": "Minqing", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Bing", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Proceedings of the tenth ACM SIGKDD international conference on Knowledge discovery and data mining", |
| "volume": "", |
| "issue": "", |
| "pages": "168--177", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Minqing Hu and Bing Liu. 2004. Mining and summa- rizing customer reviews. In Proceedings of the tenth ACM SIGKDD international conference on Knowl- edge discovery and data mining, pages 168-177. ACM.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Hunting suicide notes in web 2.0-preliminary findings", |
| "authors": [ |
| { |
| "first": "Yen-Pei", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Tiong", |
| "middle": [], |
| "last": "Goh", |
| "suffix": "" |
| }, |
| { |
| "first": "Chern Li", |
| "middle": [], |
| "last": "Liew", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Multimedia Workshops, 2007. ISMW'07. Ninth IEEE International Symposium on", |
| "volume": "", |
| "issue": "", |
| "pages": "517--521", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yen-Pei Huang, Tiong Goh, and Chern Li Liew. 2007. Hunting suicide notes in web 2.0-preliminary find- ings. In Multimedia Workshops, 2007. ISMW'07. Ninth IEEE International Symposium on, pages 517- 521. IEEE.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Tracking suicide risk factors through Twitter in the US", |
| "authors": [ |
| { |
| "first": "Jared", |
| "middle": [], |
| "last": "Jashinsky", |
| "suffix": "" |
| }, |
| { |
| "first": "Scott", |
| "middle": [], |
| "last": "Burton", |
| "suffix": "" |
| }, |
| { |
| "first": "Carl", |
| "middle": [], |
| "last": "Hanson", |
| "suffix": "" |
| }, |
| { |
| "first": "Josh", |
| "middle": [], |
| "last": "West", |
| "suffix": "" |
| }, |
| { |
| "first": "Christophe", |
| "middle": [], |
| "last": "Giraud-Carrier", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Barnes", |
| "suffix": "" |
| }, |
| { |
| "first": "Trenton", |
| "middle": [], |
| "last": "Argyle", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Crisis", |
| "volume": "35", |
| "issue": "1", |
| "pages": "51--59", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jared Jashinsky, Scott Burton, Carl Hanson, Josh West, Christophe Giraud-Carrier, Michael Barnes, and Tren- ton Argyle. 2014. Tracking suicide risk factors through Twitter in the US. Crisis, 35(1):51-59.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "BOUNCE: Sentiment Classification in Twitter using Rich Feature Sets", |
| "authors": [ |
| { |
| "first": "Nadin", |
| "middle": [], |
| "last": "K\u00f6kciyan", |
| "suffix": "" |
| }, |
| { |
| "first": "Arzucan\u00f6zg\u00fcr", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Suzan\u00fcsk\u00fcdarl", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Second Joint Conference on Lexical and Computational Semantics (*SEM): Proceedings of the Seventh International Workshop on Semantic Evaluation", |
| "volume": "2", |
| "issue": "", |
| "pages": "554--561", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nadin K\u00f6kciyan, Arda \u00c7 elebi, Arzucan\u00d6zg\u00fcr, and Suzan\u00dcsk\u00fcdarl. 2013. BOUNCE: Sentiment Clas- sification in Twitter using Rich Feature Sets. In Sec- ond Joint Conference on Lexical and Computational Semantics (*SEM): Proceedings of the Seventh Inter- national Workshop on Semantic Evaluation (SemEval 2013), volume 2, pages 554-561, Atlanta, Georgia, USA, June. Association for Computational Linguis- tics.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Texsis: bilingual terminology extraction from parallel corpora using chunk-based alignment", |
| "authors": [ |
| { |
| "first": "Lieve", |
| "middle": [], |
| "last": "Macken", |
| "suffix": "" |
| }, |
| { |
| "first": "Els", |
| "middle": [], |
| "last": "Lefever", |
| "suffix": "" |
| }, |
| { |
| "first": "Veronique", |
| "middle": [], |
| "last": "Hoste", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Terminology", |
| "volume": "19", |
| "issue": "1", |
| "pages": "1--30", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lieve Macken, Els Lefever, and Veronique Hoste. 2013. Texsis: bilingual terminology extraction from parallel corpora using chunk-based alignment. Terminology, 19(1):1-30.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Suicidal tendencies: The automatic classification of suicidal and non-suicidal lyricists using nlp", |
| "authors": [ |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Mulholland", |
| "suffix": "" |
| }, |
| { |
| "first": "Joanne", |
| "middle": [], |
| "last": "Quinn", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "IJCNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "680--684", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Matthew Mulholland and Joanne Quinn. 2013. Suici- dal tendencies: The automatic classification of suicidal and non-suicidal lyricists using nlp. In IJCNLP, pages 680-684.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Motivation and language behavior: A content analysis of suicide notes", |
| "authors": [ |
| { |
| "first": "Charles", |
| "middle": [], |
| "last": "Osgood", |
| "suffix": "" |
| }, |
| { |
| "first": "Evelyn", |
| "middle": [], |
| "last": "Walker", |
| "suffix": "" |
| } |
| ], |
| "year": 1959, |
| "venue": "The Journal of Abnormal and Social Psychology", |
| "volume": "59", |
| "issue": "1", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Charles Osgood and Evelyn Walker. 1959. Motivation and language behavior: A content analysis of suicide notes. The Journal of Abnormal and Social Psychol- ogy, 59(1):58.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Software Framework for Topic Modelling with Large Corpora", |
| "authors": [ |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Pennebaker", |
| "suffix": "" |
| }, |
| { |
| "first": "Roger", |
| "middle": [], |
| "last": "Booth", |
| "suffix": "" |
| }, |
| { |
| "first": "Martha", |
| "middle": [], |
| "last": "Francis", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Proceedings of the LREC 2010 Workshop on New Challenges for NLP Frameworks", |
| "volume": "", |
| "issue": "", |
| "pages": "45--50", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "James Pennebaker, Roger Booth, and Martha Francis. 2007. Liwc2007: Linguistic inquiry and word count. Austin, Texas: liwc. net. Radim\u0158eh\u016f\u0159ek and Petr Sojka. 2010. Software Frame- work for Topic Modelling with Large Corpora. In Pro- ceedings of the LREC 2010 Workshop on New Chal- lenges for NLP Frameworks, pages 45-50, Valletta, Malta, May. ELRA.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "The university of maryland clpsych 2015 shared task system. NAACL HLT 2015", |
| "authors": [ |
| { |
| "first": "Philip", |
| "middle": [], |
| "last": "Resnik", |
| "suffix": "" |
| }, |
| { |
| "first": "William", |
| "middle": [], |
| "last": "Armstrong", |
| "suffix": "" |
| }, |
| { |
| "first": "Leonardo", |
| "middle": [], |
| "last": "Claudino", |
| "suffix": "" |
| }, |
| { |
| "first": "Thang", |
| "middle": [], |
| "last": "Nguyen", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "54--60", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Philip Resnik, William Armstrong, Leonardo Claudino, and Thang Nguyen. 2015. The university of maryland clpsych 2015 shared task system. NAACL HLT 2015, pages 54-60.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "The language of suicide notes", |
| "authors": [ |
| { |
| "first": "Jess", |
| "middle": [ |
| "Jann" |
| ], |
| "last": "Shapero", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jess Jann Shapero. 2011. The language of suicide notes. Ph.D. thesis, University of Birmingham.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Word use in the poetry of suicidal and nonsuicidal poets", |
| "authors": [ |
| { |
| "first": "Wiltsey", |
| "middle": [], |
| "last": "Shannon", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [ |
| "W" |
| ], |
| "last": "Stirman", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Pennebaker", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Psychosomatic Medicine", |
| "volume": "63", |
| "issue": "4", |
| "pages": "517--522", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shannon Wiltsey Stirman and James W. Pennebaker. 2001. Word use in the poetry of suicidal and nonsuici- dal poets. Psychosomatic Medicine, 63(4):517-522.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Recognizing contextual polarity in phrase-level sentiment analysis", |
| "authors": [ |
| { |
| "first": "Theresa", |
| "middle": [], |
| "last": "Wilson", |
| "suffix": "" |
| }, |
| { |
| "first": "Janyce", |
| "middle": [], |
| "last": "Wiebe", |
| "suffix": "" |
| }, |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Hoffmann", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "Proceedings of the conference on human language technology and empirical methods in natural language processing", |
| "volume": "", |
| "issue": "", |
| "pages": "347--354", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Theresa Wilson, Janyce Wiebe, and Paul Hoffmann. 2005. Recognizing contextual polarity in phrase-level sentiment analysis. In Proceedings of the conference on human language technology and empirical methods in natural language processing, pages 347-354. Asso- ciation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "A comparative study on feature selection in text categorization", |
| "authors": [ |
| { |
| "first": "Yiming", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jan", |
| "middle": [], |
| "last": "Pedersen", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "ICML", |
| "volume": "97", |
| "issue": "", |
| "pages": "412--420", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yiming Yang and Jan Pedersen. 1997. A comparative study on feature selection in text categorization. In ICML, volume 97, pages 412-420.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": "Results for binary classification: green vs. all other classes (F = F-score, P = precision, R = recall, acc = accuracy)", |
| "uris": null, |
| "num": null, |
| "type_str": "figure" |
| } |
| } |
| } |
| } |