| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T12:30:52.666846Z" |
| }, |
| "title": "Safeguarding against spurious AI-based predictions: The case of automated verbal memory assessment", |
| "authors": [ |
| { |
| "first": "Chelsea", |
| "middle": [], |
| "last": "Chandler", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Colorado Boulder", |
| "location": {} |
| }, |
| "email": "chelsea.chandler@colorado.edu" |
| }, |
| { |
| "first": "Peter", |
| "middle": [ |
| "W" |
| ], |
| "last": "Foltz", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Colorado Boulder", |
| "location": {} |
| }, |
| "email": "peter.foltz@colorado.edu" |
| }, |
| { |
| "first": "Alex", |
| "middle": [ |
| "S" |
| ], |
| "last": "Cohen", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Louisiana State University", |
| "location": {} |
| }, |
| "email": "acohen@lsu.edu" |
| }, |
| { |
| "first": "Terje", |
| "middle": [ |
| "B" |
| ], |
| "last": "Holmlund", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "University of Troms\u00f8", |
| "location": { |
| "country": "Norway" |
| } |
| }, |
| "email": "terje.holmlund@uit.no" |
| }, |
| { |
| "first": "Brita", |
| "middle": [], |
| "last": "Elvev\u00e5g", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "-The Arctic University of Norway & Norwegian Centre for eHealth Research", |
| "institution": "University of Troms\u00f8", |
| "location": {} |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "A growing amount of psychiatric research incorporates machine learning and natural language processing methods, however findings have yet to be translated into actual clinical decision support systems. Many of these studies are based on relatively small datasets in homogeneous populations, which has the associated risk that the models may not perform adequately on new data in real clinical practice. The nature of serious mental illness is that it is hard to define, hard to capture, and requires frequent monitoring, which leads to imperfect data where attribute and class noise are common. With the goal of an effective AI-mediated clinical decision support system, there must be computational safeguards placed on the models used in order to avoid spurious predictions and thus allow humans to review data in the settings where models are unstable or bound not to generalize. This paper describes two approaches to implementing safeguards: (1) the determination of cases in which models are unstable by means of attribute and class based outlier detection and (2) finding the extent to which models show inductive bias. These safeguards are illustrated in the automated scoring of a story recall task via natural language processing methods. With the integration of human-in-the-loop machine learning in the clinical implementation process, incorporating safeguards such as these into the models will offer patients increased protection from spurious predictions.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "A growing amount of psychiatric research incorporates machine learning and natural language processing methods, however findings have yet to be translated into actual clinical decision support systems. Many of these studies are based on relatively small datasets in homogeneous populations, which has the associated risk that the models may not perform adequately on new data in real clinical practice. The nature of serious mental illness is that it is hard to define, hard to capture, and requires frequent monitoring, which leads to imperfect data where attribute and class noise are common. With the goal of an effective AI-mediated clinical decision support system, there must be computational safeguards placed on the models used in order to avoid spurious predictions and thus allow humans to review data in the settings where models are unstable or bound not to generalize. This paper describes two approaches to implementing safeguards: (1) the determination of cases in which models are unstable by means of attribute and class based outlier detection and (2) finding the extent to which models show inductive bias. These safeguards are illustrated in the automated scoring of a story recall task via natural language processing methods. With the integration of human-in-the-loop machine learning in the clinical implementation process, incorporating safeguards such as these into the models will offer patients increased protection from spurious predictions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Artificial intelligence (AI)-based systems that incorporate language and behavioral data hold promise of increasing sensitivity, equity, and access in the assessment and treatment of mental illness through the use of remote and continuous monitoring via clinical decision support systems. This is due to the fact that the pattern and content of language, as well as additional measures of behavior, such as timing and neuropsychological task scores, provide rich information that can be traced back to an individuals' overall mental state.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In order to demonstrate clinical translational value there are numerous risks and factors that are necessary to consider. First, it is important to collect data from large samples of the population across differing ages, cultures, genders, clinical conditions, and stages of disorder. Second, it is critical to create models that are explainable, transparent, and generalizable (Chandler et al., 2020b) in order to nurture trust from both patients and clinicians. And finally -the area that this paper will address -it is necessary to add safeguards to models such that they are capable of flagging cases that show attribute noise (i.e., abnormalities in feature values) or class noise (i.e., erroneous or missing class labels), and of determining the extent to which models will generalize to unseen data. These safeguards will enable a human-in-the-loop system where humans are required to review data abnormalities.", |
| "cite_spans": [ |
| { |
| "start": 378, |
| "end": 402, |
| "text": "(Chandler et al., 2020b)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "AI is used in a wide range of applications within mental health, notably within clinical research settings where data are used to aid in understanding the nature of diagnoses and to improve diagnostic accuracy (for reviews see Shatte et al., 2019; Su et al., 2020; Thieme et al., 2020) , as well as in making complex and potentially lifesaving de-cisions (e.g., in suicidology -for review see Cox et al., 2020) . Acoustic measurements of speech have been analyzed in automated applications for detecting Mild Cognitive Impairment and dementia (Roark et al., 2011; K\u00f6nig et al., 2015) , as well as serious mental illness and depression (McGinnis et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 227, |
| "end": 247, |
| "text": "Shatte et al., 2019;", |
| "ref_id": "BIBREF38" |
| }, |
| { |
| "start": 248, |
| "end": 264, |
| "text": "Su et al., 2020;", |
| "ref_id": "BIBREF39" |
| }, |
| { |
| "start": 265, |
| "end": 285, |
| "text": "Thieme et al., 2020)", |
| "ref_id": "BIBREF40" |
| }, |
| { |
| "start": 393, |
| "end": 410, |
| "text": "Cox et al., 2020)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 543, |
| "end": 563, |
| "text": "(Roark et al., 2011;", |
| "ref_id": "BIBREF35" |
| }, |
| { |
| "start": 564, |
| "end": 583, |
| "text": "K\u00f6nig et al., 2015)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 635, |
| "end": 658, |
| "text": "(McGinnis et al., 2019)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In the domain of techniques that specifically leverage natural language processing (NLP), there are a growing number of reports of using these methods on social media data, notably to data mine publicly shared written reports of mood on platforms such as Twitter and Reddit (Zirikly et al., 2019; Peng et al., 2019; Wu et al., 2012) . There is also a growing interest in using such methods on electronic medical records to assist in the extraction of diagnostic information or to enhance understanding of medical conditions (Ryu et al., 2016; Wang et al., 2012; Metzger et al., 2017) . A broad range of NLP metrics such as incoherence and tangentiality have been used to automatically assess the clinical state of patients with schizophrenia (Elvev\u00e5g et al., 2007) and predict the risk of psychosis onset (Bedi et al., 2015; Rosenstein et al., 2015; Corcoran et al., 2018) . Deep language models and NLP featurebased models have also been shown to differentiate the language of healthy controls from those diagnosed with Mild Cognitive Impairment or dementia (Orimaye et al., 2018; Eyigoz et al., 2020) .", |
| "cite_spans": [ |
| { |
| "start": 274, |
| "end": 296, |
| "text": "(Zirikly et al., 2019;", |
| "ref_id": "BIBREF48" |
| }, |
| { |
| "start": 297, |
| "end": 315, |
| "text": "Peng et al., 2019;", |
| "ref_id": "BIBREF34" |
| }, |
| { |
| "start": 316, |
| "end": 332, |
| "text": "Wu et al., 2012)", |
| "ref_id": "BIBREF45" |
| }, |
| { |
| "start": 524, |
| "end": 542, |
| "text": "(Ryu et al., 2016;", |
| "ref_id": "BIBREF37" |
| }, |
| { |
| "start": 543, |
| "end": 561, |
| "text": "Wang et al., 2012;", |
| "ref_id": "BIBREF42" |
| }, |
| { |
| "start": 562, |
| "end": 583, |
| "text": "Metzger et al., 2017)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 742, |
| "end": 764, |
| "text": "(Elvev\u00e5g et al., 2007)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 805, |
| "end": 824, |
| "text": "(Bedi et al., 2015;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 825, |
| "end": 849, |
| "text": "Rosenstein et al., 2015;", |
| "ref_id": "BIBREF36" |
| }, |
| { |
| "start": 850, |
| "end": 872, |
| "text": "Corcoran et al., 2018)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 1059, |
| "end": 1081, |
| "text": "(Orimaye et al., 2018;", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 1082, |
| "end": 1102, |
| "text": "Eyigoz et al., 2020)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "There is clear evidence that the clinical data used in AI-based research applications hold predictive power in detection and diagnosis, prognosis, support and treatment, and as a second opinion measurement for illness severity, but it is unclear about the degree to which these models will be stable on new data. Many psychiatric studies that harness AI tend to do so on relatively small datasets (i.e., 10-100 participants) in fairly homogeneous populations (e.g., the WEIRD (Western, Educated, Industrialized, Rich, and Democratic) phenomenon -Henrich et al., 2010; and the predominance of male participants in psychiatric research studies - Longenecker et al., 2010) . These shortcomings may lead to insufficient accuracy on unseen data retrieved from different experimental settings (e.g., in a lab vs. remote; prompted free speech vs. natural; as a component of a larger testing battery vs. on its own), populations (e.g., southern vs. northern; different English speaking countries; monolingual vs. multilingual participants), and clinical states (e.g., hallucinating vs. not hallucinating). One must keep in mind that in small datasets, spurious features may not be generalizable to a larger population, especially if they are not of any apparent clinical relevance (Chandler et al., 2020b; Whelan and Garavan, 2014) . While these research experiments are noteworthy, they must be re-evaluated on larger and more diverse sets of participants to test for robustness and generalizability.", |
| "cite_spans": [ |
| { |
| "start": 644, |
| "end": 669, |
| "text": "Longenecker et al., 2010)", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 1273, |
| "end": 1297, |
| "text": "(Chandler et al., 2020b;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 1298, |
| "end": 1323, |
| "text": "Whelan and Garavan, 2014)", |
| "ref_id": "BIBREF44" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Incorrect or ill-advised decisions and predictions in psychiatry can be dangerous and life altering for patients, and the difficulty in decision making is further confounded by the very short time frame in which changes in mental state occur and the associated clinical decisions must be made. Thus, we must build systems that have the ability to instantaneously flag data abnormalities -both in the research phase and when translated into real clinical use -and pass these cases on for human review. Furthermore, rather than selecting a preferred machine learning model based on metrics such as accuracy, sensitivity, or correlation as is common in AI and NLP applications, we must seek to understand the underlying mechanisms and the context in which they will be used (Ethayarajh and Jurafsky, 2020; Hand, 2006) .", |
| "cite_spans": [ |
| { |
| "start": 771, |
| "end": 802, |
| "text": "(Ethayarajh and Jurafsky, 2020;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 803, |
| "end": 814, |
| "text": "Hand, 2006)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Researchers in machine learning have proposed assessing models with stability metrics which define ways to quantify and compare the stability of results rather than simply focusing on the aforementioned metrics (Turney, 1995; Lange et al., 2002) . Specifically, Zhu and Wu (2004) differentiated data-based noise and outliers into class noise and attribute noise, and advocated for analyzing their effects on machine learning models separately. Uncertainty estimation, as well as inand out-of-distribution error detection has been critically important in the use of AI in a wide range of applications such as self driving cars (Mohseni et al., 2020) , general medicine (Kompa et al., 2021 ), education (Foltz et al., 2013 , and in many other domains.", |
| "cite_spans": [ |
| { |
| "start": 211, |
| "end": 225, |
| "text": "(Turney, 1995;", |
| "ref_id": "BIBREF41" |
| }, |
| { |
| "start": 226, |
| "end": 245, |
| "text": "Lange et al., 2002)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 262, |
| "end": 279, |
| "text": "Zhu and Wu (2004)", |
| "ref_id": "BIBREF47" |
| }, |
| { |
| "start": 626, |
| "end": 648, |
| "text": "(Mohseni et al., 2020)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 668, |
| "end": 687, |
| "text": "(Kompa et al., 2021", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 688, |
| "end": 720, |
| "text": "), education (Foltz et al., 2013", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper we illustrate an example of NLP and machine learning methods applied to the automated scoring of a story recall task, a core component of psychiatric neuropsychological assessments. We focus on two approaches to safeguarding such a model: 1) the detection of attribute and class noise that can affect the predictions of a model and 2) the evaluation of the extent to which the model may or may not generalize to unseen data. We first applied methods to determine where noise exists with an outlier detection algorithm and data visualization. For the issue of model generalizability, we studied the effect of dataset size on the results, and we illustrate how such results change as we randomly remove portions of our training data. Additionally, we show the results of this particular story recall model applied to a new collection of data. We advocate that these computational safeguards, which have major implications in regard to their use in human-in-the-loop clinical support systems, must be placed on each machine learning model that is developed to automate or assist in clinical assessments.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The data in the present work were collected from a mobile phone application (the delta Mental Status Examination, henceforth called dMSE) designed to assess patient state via various neuropsychological assessments, with many relying on patient language (Chandler et al., 2020a; Holmlund et al., 2020) . A total of 12 behavioral assessment tasks were employed to specifically assess the language, cognition, motor skill, and mental state of patients -areas where assessment is critical in those with serious mental illness -and integrated into the dMSE smart device application. The behavioral assessment tasks were similar to standardly employed neuropsychological tests (for an overview of neuropsychological testing, see Lezak et al., 2012) , but adapted such that they could be remotely and frequently selfadministered with variations of each task presented over time (Chandler et al., 2020a; . As an automated measurement tool that can be used remotely, frequently and self-administered, this approach has the potential to enable greater access to mental health services. It permits patients to be monitored longitudinally outside of clinical institutions and can alert clinicians to critical changes in mental states, thereby providing greater availability to assistance, regardless of age, gender, ethnicity, location, or socioeconomic status.", |
| "cite_spans": [ |
| { |
| "start": 253, |
| "end": 277, |
| "text": "(Chandler et al., 2020a;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 278, |
| "end": 300, |
| "text": "Holmlund et al., 2020)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 723, |
| "end": 742, |
| "text": "Lezak et al., 2012)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 871, |
| "end": 895, |
| "text": "(Chandler et al., 2020a;", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental overview 2.1 The dMSE", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The data comprised N = 25 patients and N = 79 presumed healthy undergraduate students from Louisiana State University who all provided informed written consent. These participants completed N = 118 and N = 226 sessions (i.e., one completion of the full battery of tasks in a single use of the application) with the dMSE, with an average of 4.72 (stdev = 1.14) and 2.90 (stdev = 0.90) per person, respectively. The patients were severely mentally ill outpatients on the psychosis spectrum. Two-thirds of the patients met the criteria for schizophrenia (N = 16), and the remaining met the criteria for major depressive disorder (N = 8) and bipolar disorder (N = 1). This study was approved by the Louisiana State University Institutional Review Board (#3618) and participants provided their informed written consent before participation. The application was designed specifically for use in remote settings, such as rural Louisiana and Northern Norway, where access to in-person clinical support can be quite difficult.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental overview 2.1 The dMSE", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The machine learning model we use to illustrate safeguarding techniques automatically scored a variant of the immediate and delayed Logical Memory story recall task (of the Wechsler Memory test; Wechsler, 1997) that was employed in the dMSE. The story recall task is critical in neuropsychological assessment as memory function is of core interest in the evaluation of many neurodevelopmental, neurodegenerative and neuropsychiatric conditions, as well as in brain injuries (Baddeley and Wilson, 2002) . Further, it is of enormous interest in mental illness research because of its value as a critical endophenotype (Cirillo and Seidman, 2003) , as well as the fact that the process of recollecting has similarities to what is required by patients when their medical history is taken.", |
| "cite_spans": [ |
| { |
| "start": 195, |
| "end": 210, |
| "text": "Wechsler, 1997)", |
| "ref_id": "BIBREF43" |
| }, |
| { |
| "start": 474, |
| "end": 501, |
| "text": "(Baddeley and Wilson, 2002)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 616, |
| "end": 643, |
| "text": "(Cirillo and Seidman, 2003)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The story recall regression model", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "In our version of this task, a participant listens to a short story of on average 74 words (min = 62, max = 87) and then is asked to retell it both immediately and after a delay of 30 minutes in as much detail as possible, thus following the same format as the traditional Wechsler version. Stories were either narrative or instructional. The narrative stories contain two characters, a setting, an action that caused a problem, and a resolution. The instructional passages described how to accomplish some sort of goal, such as how to assemble a skateboard or how to clean a fish bowl. This dMSE story recall task was developed such that there could be many different versions capable of being scored with automated NLP methods (e.g., Chandler et al., 2021 , Holmlund et al., 2020 rather than traditional rubric-based methods.", |
| "cite_spans": [ |
| { |
| "start": 736, |
| "end": 757, |
| "text": "Chandler et al., 2021", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 758, |
| "end": 781, |
| "text": ", Holmlund et al., 2020", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The story recall regression model", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "Three trained human raters with clinical experience assigned scores to the recall transcriptions based on the quality and amount of details (e.g., characters, events, dates, descriptors, feelings) recalled. The rubric was on a scale from 1 to 6, with 1 indicating no details were recalled, and 6 indicating all major and almost all minor details were recalled. Each participant completed one immediate narrative recall, one immediate instructional recall, and one delayed narrative recall per session. After the removal of responses with no words, the dataset contained N = 846 samples (N = 285 immediate narrative, N = 285 immediate instructional, and N = 276 delayed narrative).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The story recall regression model", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "A ridge regression model was created to predict the rating a trained professional would assign to a story recall. The model was trained on (1) the number of word types (i.e., unique words) in the recall, (2) the number of common word types between the original story and the recall, and (3) the BERTScore (Zhang et al., 2020) between the original story and the retell (the model was created in the same manner as that of Chandler et al., 2019 besides a change in the last feature from the word mover's distance to BERTScore). BERTScore is a similarity metric that was created to produce a score of how close a machine generated translation is to the gold standard(s) of some piece of text. Specifically, it creates a matrix of BERT (Devlin et al., 2019) cosine distances between words in one text to words in another. Alignment between words in both texts is produced greedily with the maximum cosine distance for each word in one text to another in the reference. All distances are averaged and inverse document frequency weightings are optionally incorporated. The ridge regression model was trained and tested using 10-fold cross-validation and controlled such that sessions from the same participant did not occur simultaneously in both the train and test sets. The rating prediction model resulted in an average Pearson r correlation with human ratings of r = 0.91. These results indicate that we can automatically derive a range of semantic and surface level features from spoken recalls, and that these features can be harnessed to accurately predict the ratings of expert humans.", |
| "cite_spans": [ |
| { |
| "start": 305, |
| "end": 325, |
| "text": "(Zhang et al., 2020)", |
| "ref_id": "BIBREF46" |
| }, |
| { |
| "start": 421, |
| "end": 442, |
| "text": "Chandler et al., 2019", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 732, |
| "end": 753, |
| "text": "(Devlin et al., 2019)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "The story recall regression model", |
| "sec_num": "2.2" |
| }, |
| { |
| "text": "We begin our analysis of computational safeguards by discussing the determination of attribute and class noise in the context of model stability. Model stability analysis allows us to establish how un- usual variations in input data will affect the output of the model. Put simply, we wish to find where in the feature space models may be the most unstable. We illustrate an approach that will allow researchers to detect attribute and class noise in data that could be due to construct-irrelevance or errors in assumptions. Specifically, attribute noise is where values of individual attributes do not make sense; whether they are erroneous or missing. Class noise is where a label does not make sense given the distribution of the features for other data with the same label; whether it is mislabeled or contradictory. In order to make the notions of attribute noise and class noise concrete, see Table 1 for a hypothetical distribution of the story recall data with an emphasis on what could potentially constitute both types of noise. In this section, we explore instability that could be due to outliers in training data, disagreement between features, or incorrect assumptions of the data.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 899, |
| "end": 906, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Effects of attribute and class noise", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Our first outlier analysis was based on researchstage settings where we have access to both attribute values and class labels. While this exact approach may not always be feasible in the eventual clinical application stage (since there are not always ground truth class labels available), the approach itself can nonetheless be harnessed in Figure 1 : Distribution of the first dimension of Principal Component Analysis (PCA) of the 3 features of the story recall data separated by rating. The darker colored peak on the left represents the lowest rating (1 point) which increases by one point per peak to the lighter colored peak on the right hand side (6 points). Outliers found with the Isolation Forest algorithm are shown with a cross and the color of the cross represents the human rating given to that example. the same manner but with the omission of ratings, classes, or labels. Here, we discovered outliers using the Isolation Forest algorithm (Liu et al., 2018) . Most outlier detection algorithms first find the normal region of data and subsequently define anything outside of this defined region to be an outlier. The Isolation Forest algorithm, on the other hand, discovers minority data points that have attribute values that differ from those of the usual instances. Specifically, the algorithm isolates examples by selecting an attribute at random and then selecting a random split value between the maximum and minimum values of the selected feature. Anomalous examples will have shorter paths from the root to the leaves in their isolation trees than the normal examples since they need fewer partitions to be isolated. This algorithm is well-suited for high dimensional datasets and has proven to be an effective way of detecting outliers and anomalies (Ding and Fei, 2013) . Furthermore, it works especially well for behavioral data as \"normal\" regions tend to be more variable than in other domains.", |
| "cite_spans": [ |
| { |
| "start": 954, |
| "end": 972, |
| "text": "(Liu et al., 2018)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 1774, |
| "end": 1794, |
| "text": "(Ding and Fei, 2013)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 341, |
| "end": 349, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Effects of attribute and class noise", |
| "sec_num": "3" |
| }, |
| { |
| "text": "The current outlier analysis was specifically based on the number of types (i.e., unique words), the number of common types between the original story and the recall, the BERTScore between the original story and the recall, and the human rating given to the recall. Figure 1 shows the results of applying the Isolation Forest algorithm to the story recall data. It is shown that 18 outliers were detected. Such instances would be flagged for human review, where researchers can determine if attribute or class noise is present and either fix the erroneous values or exclude them from the modeling in the case that the examples are entirely invalid. When the approach is used in clinical settings to flag attribute noise, clinicians can review the raw data and make determinations for themselves rather than relying on a machine prediction.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 266, |
| "end": 274, |
| "text": "Figure 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Effects of attribute and class noise", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Out of the 18 examples flagged by the Isolation Forest algorithm, 9 were determined to be invalid responses (i.e., participants stating that they simply do not remember or responses that are insufficient for data analysis) and 9 were valid responses with either sparse amounts of language or large amounts of language but poor performance. The average absolute error on the outliers was 1.34 (stdev = 0.80); the valid response outliers generated a higher absolute error (average = 1.63, stdev = 0.91) than the invalid response outliers (average = 1.05, stdev = 0.63). The performance of the model on outlier data is far lower than the models overall performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Effects of attribute and class noise", |
| "sec_num": "3" |
| }, |
| { |
| "text": "As the contamination threshold of the Isolation Forest algorithm is increased (i.e., the criteria for an outlier is relaxed), additional responses are chosen that mirror the behavior of these 18. This is a parameter that would need to be tuned such that all true outliers are detected yet it does not extend into the normal data range. Furthermore, this parameter will need to be learned by investigating the true distribution of these phenomena and will depend on the application. Interestingly, the model performance did not change with the removal of these outliers. As approximately 2% of the data was flagged in this experiment, the model behaved indifferently to their exclusion. The exclusion of extremes (which help the performance of the model) combined with noise (which harm the performance of the model) potentially balanced out the effects of both. This Isolation Forest analysis can be performed on the same data without ratings in the eventual clinical stage to find attribute noise and extremes. We also present an analysis of features alone that can be done in any stage of the modeling process.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Effects of attribute and class noise", |
| "sec_num": "3" |
| }, |
| { |
| "text": "A basic noise detection approach that can be used at any stage of the modeling process is to simply find the examples with low attribute agreement (assuming that the attributes are collinear). Figure 2 depicts the distribution of two of the most predictive features of the story recall rating prediction model (the number of common word types and the BERTScore between the original story and the recall). There is a steady agreement between the two features, with some outliers (marked with crosses) outside of the diagonal where the features do not agree. The color of the circles represent how far off the model rating was from the human rating. Two examples with exceptionally high error (~2.5-3.0) are identified in red. The bottom-most red example is a response with a mixture of correct and incorrect (random) details, as well as incoherent language. The top-most red example is a response with a high BERTScore even though only a recitation of the title of the story was spoken. This high disagreement between features in turn uncovered a faulty feature score potentially due to flawed weighting parameters in the BERTScore model. We have shown that examples located off of the diagonal in plots such as these should be passed on for human evaluation as disagreement in two objective collinear attributes of story recall may raise concern.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 193, |
| "end": 202, |
| "text": "Figure 2", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Effects of attribute and class noise", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Finding these outliers is critical because if a model has not been exposed to certain combinations of features or labels in its training set, then we cannot assume that it will produce accurate predictions in such settings. Outliers are important to detect both in the research stage in order to update or exclude certain examples from affecting the model in a negative manner and in the clinical setting so that spurious decisions are not made on abnormal data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Effects of attribute and class noise", |
| "sec_num": "3" |
| }, |
| { |
| "text": "As previously stated, one of the most critical safeguards to spurious AI-based predictions is using large, diverse, and representative data (Cirillo et al., 2020) , but this is not always possible. When using human behavioral data in machine learning algorithms, researchers inadvertently make the assumption that there is one canonical representation of specific groups of humans (i.e., those with serious mental illness), but this is simply not true. Those with psychiatric disorders exhibit extremely diverse symptoms and behaviors. Human behavior displays patterns indicative of a chaotic system (Paulus and Braff, 2003; Guess and Sailor, 1993) , which holds true for behavior within one person as well as behavior within a group. To approach the topic of generalizable data, we first explored whether choosing different subsets within a training dataset would affect the output of the resulting model and whether there are spurious results when using smaller subsets.", |
| "cite_spans": [ |
| { |
| "start": 140, |
| "end": 162, |
| "text": "(Cirillo et al., 2020)", |
| "ref_id": null |
| }, |
| { |
| "start": 600, |
| "end": 624, |
| "text": "(Paulus and Braff, 2003;", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 625, |
| "end": 648, |
| "text": "Guess and Sailor, 1993)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Effects of model generalizability", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The story recall regression model was trained on N = 846 samples, a large size relative to clinical experiments in the mental health domain. We used stratified sampling to create smaller subsets of the data that retain the proportions of each rating The change in the average and standard deviation (stdev) of the correlations between (1) the human rating and the model rating, (2) the human rating and BERTScore, and (3) the human rating and common word types as smaller subsets of the data are randomly chosen in a stratified manner for training and testing. The first column displays the percent of data and the number of data points used in each data reduction setting.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Effects of model generalizability", |
| "sec_num": "4" |
| }, |
| { |
| "text": "and tested how the model behaved on these smaller subsets. Table 2 depicts the changing accuracy of the model and correlations of features to human ratings when these smaller subsets of the data were used for training and testing. We found the average correlation over a 10-fold cross-validation of the sampled subsets controlled such that sessions from the same participant did not occur simultaneously in both the training and testing sets. So as to show the low effect on the randomness involved in sampling smaller subsets, we report these metrics after 10 random re-samplings. It is shown that this regression model is stable when smaller subsets of the training data are used. Had the model shown significant drops in accuracy when restricting the dataset size, it could be concluded that the model was unstable or had overfit the training data.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 59, |
| "end": 66, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Effects of model generalizability", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Since experiments based on subsets of data retrieved from the same experimental population and setting do not necessarily show the true extent of model generalizability, we also performed transfer tests of the story recall model. Specifically, a second dataset was collected from inpatients at a substance abuse program in Louisiana (N = 99), most of whom suffered from co-occurring mood, psychotic, anxiety and personality spectrum disorders, as well as an additional collection from presumed healthy undergraduates at Louisiana State University (N = 124). Together, the inpatients and the presumed healthy undergraduates completed N = 1254 story recalls. A ridge regression model with the same NLP features as previously reported was trained on the initial dataset and tested on the new dataset, as well as vice versa. The first experiment resulted in a Pearson r correlation of 0.86 and the reverse an r of 0.84. Here, we conclude that the story recall regression model will generalize to differing clinical populations as well as illness severities. The same may not hold true for differing cultural populations as language differences may prove to be a confounding variable in transferring such a model. We thus advocate testing models on each new population prior to implementation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Effects of model generalizability", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Neuropsychological task scoring is a much more objective application area than other modeling applications in this field in which less is known and gold standard labels are often disagreed upon (e.g., disease detection, mental state tracking, and so on). Thus, generalizability is much more critical to test in these other applications and will potentially not yield such robust conclusions. Nonetheless, the understanding of when a model will yield accurate output and when it will not is an extremely important endeavor. Finding representative data is of the utmost importance in machine learning. In some cases, such as the story recall regression model, it is best to get as much data from as many people as possible. In other cases, especially when dealing with extreme diversity between individuals or subsets of individuals, it may be best to only use data that behaves in a similar fashion to the example currently being tested.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Effects of model generalizability", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Mental health is extremely dynamic as it can change on the scale of seconds, minutes, hours, or days, and language offers an objective and potentially unobtrusive way to assay such changes. Mental state in some conditions can change quickly with fatal consequences (e.g., suicide attempts) and more frequent monitoring of language and behavioral data, combined with machine learning methods, has the potential to offer clinicians unprecedented support in tracking patient state. Language can be harnessed for many applications as it offers a quantitative conceptualization of a person's underlying thought processes and mental health. Tracking such phenomena is extremely important yet increasingly complex, and as such there is a need for greater reliance on model outputs in this field.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "In experiments involving NLP methods, it is common to deal with high dimensionality from features like word embeddings, parser outputs, and so on, which makes interpretation and understanding of models difficult. Features often go beyond normal distributions and as such there tends to be high variability in data distributions. Thus, it is especially important to create methods and tools that allow us to better understand the feature space and determine whether attributes or classes may violate assumptions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "An eventual goal of this line of work is to have a human-in-the-loop system where models analyze streams of high dimensional patient data and produce predictions of mental state and well-being. In the research stage of this implementation, real data must be analyzed to determine what normal distributions of attributes and classes appear to be. Aberrant instances of patient data can be flagged and reviewed by researchers to either update or exclude from models. Researchers must also test their models' generalizability by collecting additional samples or performing validation techniques to verify performance on unseen data. This process will allow for models to be based on the most accurate and representative data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "In the eventual clinical decision support system implementation, models must be realized such that attribute outliers are not predicted on, but rather the raw data is passed to a clinician to make a judgment. If the outlier is due to faulty feature values, clinicians can update these values or they can create their own labels and update the system such that future similar cases would not necessarily need to be verified by a human. In such a situation, there is a \"best of both worlds\" where models can execute the tasks that they are best at (high dimensional data analysis) and humans can execute the tasks that they are best at (handling anomalies and interpreting patient data).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "For NLP and machine learning methods to be adopted in current research experiments as well as in eventual clinical practice, they require critical peer evaluation. What is needed is transparency in terms of data collection, validation, reproducibility, and clinical agreement in the association of language features to underlying illness. This paper showcases how essential it is that clinicians are involved in all stages of development. As such, it is a large step towards bringing more ethics and transparency into AI-based studies in mental health. Ethics review boards must demand this type of transparency and fairness in the creation of models so that systems that harness machine learning can be implemented in real clinical practice with low risk. Some discussion of this path forward has been brought to light by Friesen et al. (2021) who reported on IRBs as a means of ethics oversight in health research that harnesses AI.", |
| "cite_spans": [ |
| { |
| "start": 823, |
| "end": 844, |
| "text": "Friesen et al. (2021)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Discussion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "This paper illustrates the importance of understanding the assumptions and distributions that underlie training data and the algorithms used, as well as the need to flag data that have characteristics that violate these assumptions. Not only is this knowledge important, but so too is having the tools to do this. We found model instabilities in a story recall regression model with the use of outlier detection algorithms and error analyses with respect to varying input. We advocate that approaches such as these be incorporated into machine learning and NLP-based clinical research and implementation. With the complexities inherent to models based on many features, high numbers of parameters, highly variable human behavioral data, and extremely high (and potentially fatal) stakes for mistakes, it is critical to establish methods beyond model designer intuition in assuring robustness and that predictions cannot be made on out of range data or data that lies in areas of instability. It should now be obvious that high predictive power on a relatively small dataset does not entail clinical relevance or generalizability, and that it is essential to use larger data sets, have more data collection outside of controlled settings, incorporate modeling safeguards, and use human-in-the-loop methodologies at all steps of the process.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "6" |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "Parts of this project were funded by grant 231395 from the Research Council of Norway awarded to Brita Elvev\u00e5g.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Prose recall and amnesia: implications for the structure of working memory", |
| "authors": [ |
| { |
| "first": "Alan", |
| "middle": [], |
| "last": "Baddeley", |
| "suffix": "" |
| }, |
| { |
| "first": "Barbara", |
| "middle": [ |
| "A" |
| ], |
| "last": "Wilson", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Neuropsychologia", |
| "volume": "40", |
| "issue": "", |
| "pages": "1737--1743", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/S0028-3932(01)00146-4" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alan Baddeley and Barbara A. Wilson. 2002. Prose recall and amnesia: implications for the structure of working memory. Neuropsychologia, 40:1737- 1743.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Automated analysis of free speech predicts psychosis onset in high-risk youths", |
| "authors": [ |
| { |
| "first": "Gillinder", |
| "middle": [], |
| "last": "Bedi", |
| "suffix": "" |
| }, |
| { |
| "first": "Facundo", |
| "middle": [], |
| "last": "Carrillo", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillermo", |
| "middle": [ |
| "A" |
| ], |
| "last": "Cecchi", |
| "suffix": "" |
| }, |
| { |
| "first": "Diego", |
| "middle": [ |
| "Fern\u00e1ndez" |
| ], |
| "last": "Slezak", |
| "suffix": "" |
| }, |
| { |
| "first": "Mariano", |
| "middle": [], |
| "last": "Sigman", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Nat\u00e1lia", |
| "suffix": "" |
| }, |
| { |
| "first": "Sidarta", |
| "middle": [], |
| "last": "Mota", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [ |
| "C" |
| ], |
| "last": "Ribeiro", |
| "suffix": "" |
| }, |
| { |
| "first": "Mauro", |
| "middle": [], |
| "last": "Javitt", |
| "suffix": "" |
| }, |
| { |
| "first": "Cheryl", |
| "middle": [ |
| "M" |
| ], |
| "last": "Copelli", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Corcoran", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1038/npjschz.2015.30" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Gillinder Bedi, Facundo Carrillo, Guillermo A. Cec- chi, Diego Fern\u00e1ndez Slezak, Mariano Sigman, Nat\u00e1lia B. Mota, Sidarta Ribeiro, Daniel C. Javitt, Mauro Copelli, and Cheryl M. Corcoran. 2015. Au- tomated analysis of free speech predicts psychosis onset in high-risk youths. npj Schizophrenia, 1.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Overcoming the bottleneck in traditional assessments of verbal memory: Modeling human ratings and classifying clinical group membership", |
| "authors": [ |
| { |
| "first": "Chelsea", |
| "middle": [], |
| "last": "Chandler", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [ |
| "W" |
| ], |
| "last": "Foltz", |
| "suffix": "" |
| }, |
| { |
| "first": "Jian", |
| "middle": [], |
| "last": "Cheng", |
| "suffix": "" |
| }, |
| { |
| "first": "Jared", |
| "middle": [ |
| "C" |
| ], |
| "last": "Bernstein", |
| "suffix": "" |
| }, |
| { |
| "first": "Elizabeth", |
| "middle": [ |
| "P" |
| ], |
| "last": "Rosenfeld", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [ |
| "S" |
| ], |
| "last": "Cohen", |
| "suffix": "" |
| }, |
| { |
| "first": "Terje", |
| "middle": [ |
| "B" |
| ], |
| "last": "Holmlund", |
| "suffix": "" |
| }, |
| { |
| "first": "Brita", |
| "middle": [], |
| "last": "Elvev\u00e5g", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Sixth Workshop on Computational Linguistics and Clinical Psychology", |
| "volume": "", |
| "issue": "", |
| "pages": "137--147", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-3016" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chelsea Chandler, Peter W. Foltz, Jian Cheng, Jared C. Bernstein, Elizabeth P. Rosenfeld, Alex S. Cohen, Terje B. Holmlund, and Brita Elvev\u00e5g. 2019. Over- coming the bottleneck in traditional assessments of verbal memory: Modeling human ratings and clas- sifying clinical group membership. In Proceedings of the Sixth Workshop on Computational Linguistics and Clinical Psychology, pages 137-147.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Machine learning for ambulatory applications of neuropsychological testing", |
| "authors": [ |
| { |
| "first": "Chelsea", |
| "middle": [], |
| "last": "Chandler", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [ |
| "W" |
| ], |
| "last": "Foltz", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [ |
| "S" |
| ], |
| "last": "Cohen", |
| "suffix": "" |
| }, |
| { |
| "first": "Terje", |
| "middle": [ |
| "B" |
| ], |
| "last": "Holmlund", |
| "suffix": "" |
| }, |
| { |
| "first": "Jian", |
| "middle": [], |
| "last": "Cheng", |
| "suffix": "" |
| }, |
| { |
| "first": "Jared", |
| "middle": [ |
| "C" |
| ], |
| "last": "Bernstein", |
| "suffix": "" |
| }, |
| { |
| "first": "Elizabeth", |
| "middle": [ |
| "P" |
| ], |
| "last": "Rosenfeld", |
| "suffix": "" |
| }, |
| { |
| "first": "Brita", |
| "middle": [], |
| "last": "Elvev\u00e5g", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Intelligence-Based Medicine", |
| "volume": "", |
| "issue": "", |
| "pages": "1--2", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/j.ibmed.2020.100006" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chelsea Chandler, Peter W. Foltz, Alex S. Cohen, Terje B. Holmlund, Jian Cheng, Jared C. Bern- stein, Elizabeth P. Rosenfeld, and Brita Elvev\u00e5g. 2020a. Machine learning for ambulatory applica- tions of neuropsychological testing. Intelligence- Based Medicine, 1-2.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Using machine learning in psychiatry: The need to establish a framework that nurtures trustworthiness", |
| "authors": [ |
| { |
| "first": "Chelsea", |
| "middle": [], |
| "last": "Chandler", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [ |
| "W" |
| ], |
| "last": "Foltz", |
| "suffix": "" |
| }, |
| { |
| "first": "Brita", |
| "middle": [], |
| "last": "Elvev\u00e5g", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Schizophrenia Bulletin", |
| "volume": "46", |
| "issue": "", |
| "pages": "11--14", |
| "other_ids": { |
| "DOI": [ |
| "10.1093/schbul/sbz105" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chelsea Chandler, Peter W. Foltz, and Brita Elvev\u00e5g. 2020b. Using machine learning in psychiatry: The need to establish a framework that nurtures trustwor- thiness. Schizophrenia Bulletin, 46:11-14.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Extending the usefulness of the verbal memory test: The promise of machine learning", |
| "authors": [ |
| { |
| "first": "Chelsea", |
| "middle": [], |
| "last": "Chandler", |
| "suffix": "" |
| }, |
| { |
| "first": "Terje", |
| "middle": [ |
| "B" |
| ], |
| "last": "Holmlund", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [ |
| "W" |
| ], |
| "last": "Foltz", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [ |
| "S" |
| ], |
| "last": "Cohen", |
| "suffix": "" |
| }, |
| { |
| "first": "Brita", |
| "middle": [], |
| "last": "Elvev\u00e5g", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Psychiatry Research", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/j.psychres.2021.113743" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chelsea Chandler, Terje B. Holmlund, Peter W. Foltz, Alex S. Cohen, and Brita Elvev\u00e5g. 2021. Extend- ing the usefulness of the verbal memory test: The promise of machine learning. Psychiatry Research, 297.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Mar\u00eda Jos\u00e9 Rementeria, Antonella Santuccione Chadha, and Nikolaos Mavridis. 2020. Sex and gender differences and biases in artificial intelligence for biomedicine and healthcare", |
| "authors": [ |
| { |
| "first": "Davide", |
| "middle": [], |
| "last": "Cirillo", |
| "suffix": "" |
| }, |
| { |
| "first": "Silvina", |
| "middle": [], |
| "last": "Catuara-Solarz", |
| "suffix": "" |
| }, |
| { |
| "first": "Czuee", |
| "middle": [], |
| "last": "Morey", |
| "suffix": "" |
| }, |
| { |
| "first": "Emre", |
| "middle": [], |
| "last": "Guney", |
| "suffix": "" |
| }, |
| { |
| "first": "Laia", |
| "middle": [], |
| "last": "Subirats", |
| "suffix": "" |
| }, |
| { |
| "first": "Simona", |
| "middle": [], |
| "last": "Mellino", |
| "suffix": "" |
| }, |
| { |
| "first": "Annalisa", |
| "middle": [], |
| "last": "Gigante", |
| "suffix": "" |
| }, |
| { |
| "first": "Alfonso", |
| "middle": [], |
| "last": "Valencia", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1038/s41746-020-0288-5" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Davide Cirillo, Silvina Catuara-Solarz, Czuee Morey, Emre Guney, Laia Subirats, Simona Mellino, An- nalisa Gigante, Alfonso Valencia, Mar\u00eda Jos\u00e9 Re- menteria, Antonella Santuccione Chadha, and Niko- laos Mavridis. 2020. Sex and gender differences and biases in artificial intelligence for biomedicine and healthcare. npj Digital Medicine, 3.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Verbal declarative memory dysfunction in schizophrenia: from clinical assessment to genetics and brain mechanisms", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Michael", |
| "suffix": "" |
| }, |
| { |
| "first": "Larry", |
| "middle": [ |
| "J" |
| ], |
| "last": "Cirillo", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Seidman", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Neuropsychol. Rev", |
| "volume": "13", |
| "issue": "", |
| "pages": "43--77", |
| "other_ids": { |
| "DOI": [ |
| "10.1023/a:1023870821631" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Michael A. Cirillo and Larry J. Seidman. 2003. Ver- bal declarative memory dysfunction in schizophre- nia: from clinical assessment to genetics and brain mechanisms. Neuropsychol. Rev, 13:43-77.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Ambulatory vocal acoustics, temporal dynamics and serious mental illness", |
| "authors": [ |
| { |
| "first": "Alex", |
| "middle": [ |
| "S" |
| ], |
| "last": "Cohen", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Taylor", |
| "suffix": "" |
| }, |
| { |
| "first": "Elana", |
| "middle": [ |
| "K" |
| ], |
| "last": "Fedechko", |
| "suffix": "" |
| }, |
| { |
| "first": "Thanh", |
| "middle": [ |
| "P" |
| ], |
| "last": "Schwartz", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [ |
| "W" |
| ], |
| "last": "Le", |
| "suffix": "" |
| }, |
| { |
| "first": "Jared", |
| "middle": [], |
| "last": "Foltz", |
| "suffix": "" |
| }, |
| { |
| "first": "Jian", |
| "middle": [], |
| "last": "Bernstein", |
| "suffix": "" |
| }, |
| { |
| "first": "Terje", |
| "middle": [ |
| "B" |
| ], |
| "last": "Cheng", |
| "suffix": "" |
| }, |
| { |
| "first": "Brita", |
| "middle": [], |
| "last": "Holmlund", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Elvev\u00e5g", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Journal of Abnormal Psychology", |
| "volume": "128", |
| "issue": "", |
| "pages": "97--105", |
| "other_ids": { |
| "DOI": [ |
| "10.1037/abn0000397" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alex S. Cohen, Taylor L Fedechko, Elana K. Schwartz, Thanh P. Le, Peter W. Foltz, Jared Bernstein, Jian Cheng, Terje B. Holmlund, and Brita Elvev\u00e5g. 2019. Ambulatory vocal acoustics, temporal dynamics and serious mental illness. Journal of Abnormal Psy- chology, 128:97-105.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Prediction of psychosis across protocols and risk cohorts using automated language analysis", |
| "authors": [ |
| { |
| "first": "Cheryl", |
| "middle": [ |
| "M" |
| ], |
| "last": "Corcoran", |
| "suffix": "" |
| }, |
| { |
| "first": "Facundo", |
| "middle": [], |
| "last": "Carrillo", |
| "suffix": "" |
| }, |
| { |
| "first": "Diego", |
| "middle": [], |
| "last": "Fern\u00e1ndez-Slezak", |
| "suffix": "" |
| }, |
| { |
| "first": "Gillinder", |
| "middle": [], |
| "last": "Bedi", |
| "suffix": "" |
| }, |
| { |
| "first": "Casimir", |
| "middle": [], |
| "last": "Klim", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [ |
| "C" |
| ], |
| "last": "Javitt", |
| "suffix": "" |
| }, |
| { |
| "first": "Carrie", |
| "middle": [ |
| "E" |
| ], |
| "last": "Bearden", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillermo", |
| "middle": [ |
| "A" |
| ], |
| "last": "Cecchi", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "World Psychiatry", |
| "volume": "17", |
| "issue": "", |
| "pages": "67--75", |
| "other_ids": { |
| "DOI": [ |
| "10.1002/wps.20491" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Cheryl M. Corcoran, Facundo Carrillo, Diego Fern\u00e1ndez-Slezak, Gillinder Bedi, Casimir Klim, Daniel C. Javitt, Carrie E. Bearden, and Guillermo A. Cecchi. 2018. Prediction of psychosis across protocols and risk cohorts using automated language analysis. World Psychiatry, 17:67-75.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Machine learning for suicidology: A practical review of exploratory and hypothesis-driven approaches", |
| "authors": [ |
| { |
| "first": "Christopher", |
| "middle": [ |
| "R" |
| ], |
| "last": "Cox", |
| "suffix": "" |
| }, |
| { |
| "first": "Emma", |
| "middle": [ |
| "H" |
| ], |
| "last": "Moscardini", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [ |
| "S" |
| ], |
| "last": "Cohen", |
| "suffix": "" |
| }, |
| { |
| "first": "Raymond", |
| "middle": [ |
| "P" |
| ], |
| "last": "Tucker", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Clin Psychol Rev", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/j.cpr.2020.101940" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christopher R. Cox, Emma H. Moscardini, Alex S. Cohen, and Raymond P. Tucker. 2020. Machine learning for suicidology: A practical review of ex- ploratory and hypothesis-driven approaches. Clin Psychol Rev, 82.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "BERT: Pre-training of deep bidirectional transformers for language understanding", |
| "authors": [ |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristina", |
| "middle": [], |
| "last": "Toutanova", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies", |
| "volume": "1", |
| "issue": "", |
| "pages": "4171--4186", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N19-1423" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. 2019. BERT: Pre-training of deep bidirectional transformers for language under- standing. In Proceedings of the 2019 Conference of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies, Volume 1 (Long and Short Papers), pages 4171-4186, Minneapolis, Minnesota. Associ- ation for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "An anomaly detection approach based on isolation forest algorithm for streaming data using sliding window", |
| "authors": [ |
| { |
| "first": "Zhiguo", |
| "middle": [], |
| "last": "Ding", |
| "suffix": "" |
| }, |
| { |
| "first": "Minrui", |
| "middle": [], |
| "last": "Fei", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "PIFAC Proceedings Volumes", |
| "volume": "46", |
| "issue": "", |
| "pages": "12--17", |
| "other_ids": { |
| "DOI": [ |
| "10.3182/20130902-3-CN-3020.00044" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhiguo Ding and Minrui Fei. 2013. An anomaly de- tection approach based on isolation forest algorithm for streaming data using sliding window. In PIFAC Proceedings Volumes, volume 46, pages 12-17.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Quantifying incoherence in speech: an automated methodology and novel application to schizophrenia", |
| "authors": [ |
| { |
| "first": "Brita", |
| "middle": [], |
| "last": "Elvev\u00e5g", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [ |
| "W" |
| ], |
| "last": "Foltz", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [ |
| "R" |
| ], |
| "last": "Weinberger", |
| "suffix": "" |
| }, |
| { |
| "first": "Terry", |
| "middle": [ |
| "E" |
| ], |
| "last": "Goldberg", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Schizophrenia Research", |
| "volume": "93", |
| "issue": "", |
| "pages": "304--316", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/j.schres.2007.03.001" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Brita Elvev\u00e5g, Peter W. Foltz, Daniel R. Weinberger, and Terry E. Goldberg. 2007. Quantifying inco- herence in speech: an automated methodology and novel application to schizophrenia. Schizophrenia Research, 93:304-316.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Utility is in the eye of the user: A critique of nlp leaderboards", |
| "authors": [ |
| { |
| "first": "Kawin", |
| "middle": [], |
| "last": "Ethayarajh", |
| "suffix": "" |
| }, |
| { |
| "first": "Dan", |
| "middle": [], |
| "last": "Jurafsky", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "4846--4853", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/2020.emnlp-main.393" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kawin Ethayarajh and Dan Jurafsky. 2020. Utility is in the eye of the user: A critique of nlp leaderboards. In Proceedings of the 2020 Conference on Empiri- cal Methods in Natural Language Processing, pages 4846-4853.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Linguistic markers predict onset of alzheimer's disease. EClinicalMedicine", |
| "authors": [ |
| { |
| "first": "Elif", |
| "middle": [], |
| "last": "Eyigoz", |
| "suffix": "" |
| }, |
| { |
| "first": "Sachin", |
| "middle": [], |
| "last": "Mathur", |
| "suffix": "" |
| }, |
| { |
| "first": "Guillermo Cecchi Mar", |
| "middle": [], |
| "last": "Santamaria", |
| "suffix": "" |
| }, |
| { |
| "first": "Melissa", |
| "middle": [], |
| "last": "Naylor", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "28", |
| "issue": "", |
| "pages": "304--316", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/j.eclinm.2020.100583" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Elif Eyigoz, Sachin Mathur, Guillermo Cecchi Mar Santamaria, and Melissa Naylor. 2020. Lin- guistic markers predict onset of alzheimer's disease. EClinicalMedicine, 28:304-316.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Improving performance of automated scoring through detection of outliers and understanding model instabilities", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Peter", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Foltz", |
| "suffix": "" |
| }, |
| { |
| "first": "Karen", |
| "middle": [ |
| "E" |
| ], |
| "last": "Rosenstein", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Lochbaum", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Presented at the National Council on Measurement in Education Conference", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter W. Foltz, Mark Rosenstein, and Karen E. Lochbaum. 2013. Improving performance of au- tomated scoring through detection of outliers and understanding model instabilities. In Presented at the National Council on Measurement in Education Conference, San Francisco, CA.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Governing ai-driven health research: Are irbs up to the task?", |
| "authors": [ |
| { |
| "first": "Phoebe", |
| "middle": [], |
| "last": "Friesen", |
| "suffix": "" |
| }, |
| { |
| "first": "Rachel", |
| "middle": [], |
| "last": "Douglas-Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Mason", |
| "middle": [], |
| "last": "Marks", |
| "suffix": "" |
| }, |
| { |
| "first": "Robin", |
| "middle": [], |
| "last": "Pierce", |
| "suffix": "" |
| }, |
| { |
| "first": "Katherine", |
| "middle": [], |
| "last": "Fletcher", |
| "suffix": "" |
| }, |
| { |
| "first": "Abhishek", |
| "middle": [], |
| "last": "Mishra", |
| "suffix": "" |
| }, |
| { |
| "first": "Jessica", |
| "middle": [], |
| "last": "Lorimer", |
| "suffix": "" |
| }, |
| { |
| "first": "Carissa", |
| "middle": [], |
| "last": "V\u00e9liz", |
| "suffix": "" |
| }, |
| { |
| "first": "Nina", |
| "middle": [], |
| "last": "Hallowell", |
| "suffix": "" |
| }, |
| { |
| "first": "Mackenzie", |
| "middle": [], |
| "last": "Graham", |
| "suffix": "" |
| }, |
| { |
| "first": "Mei", |
| "middle": [ |
| "Sum" |
| ], |
| "last": "Chan", |
| "suffix": "" |
| }, |
| { |
| "first": "Huw", |
| "middle": [], |
| "last": "Davies", |
| "suffix": "" |
| }, |
| { |
| "first": "Taj", |
| "middle": [], |
| "last": "Sallamuddin", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "Ethics Hum Res", |
| "volume": "43", |
| "issue": "", |
| "pages": "35--42", |
| "other_ids": { |
| "DOI": [ |
| "10.1002/eahr.500085" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Phoebe Friesen, Rachel Douglas-Jones, Mason Marks, Robin Pierce, Katherine Fletcher, Abhishek Mishra, Jessica Lorimer, Carissa V\u00e9liz, Nina Hallowell, Mackenzie Graham, Mei Sum Chan, Huw Davies, and Taj Sallamuddin. 2021. Governing ai-driven health research: Are irbs up to the task? Ethics Hum Res, 43:35-42.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Chaos theory and the study of human behavior: Implications for special education and developmental disabilities", |
| "authors": [ |
| { |
| "first": "Doug", |
| "middle": [], |
| "last": "Guess", |
| "suffix": "" |
| }, |
| { |
| "first": "Wayne", |
| "middle": [], |
| "last": "Sailor", |
| "suffix": "" |
| } |
| ], |
| "year": 1993, |
| "venue": "The Journal of Special Education", |
| "volume": "27", |
| "issue": "", |
| "pages": "16--34", |
| "other_ids": { |
| "DOI": [ |
| "10.1177/002246699302700102" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Doug Guess and Wayne Sailor. 1993. Chaos theory and the study of human behavior: Implications for spe- cial education and developmental disabilities. The Journal of Special Education, 27:16-34.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Classifier technology and the illusion of progress", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "David", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Hand", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Statistical Science", |
| "volume": "21", |
| "issue": "", |
| "pages": "1--14", |
| "other_ids": { |
| "DOI": [ |
| "10.1214/088342306000000060" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "David J. Hand. 2006. Classifier technology and the il- lusion of progress. Statistical Science, 21:1-14.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "The weirdest people in the world", |
| "authors": [ |
| { |
| "first": "Joseph", |
| "middle": [], |
| "last": "Henrich", |
| "suffix": "" |
| }, |
| { |
| "first": "Steven", |
| "middle": [ |
| "J" |
| ], |
| "last": "Heine", |
| "suffix": "" |
| }, |
| { |
| "first": "Ara", |
| "middle": [], |
| "last": "Norenzayan", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Behav Brain Sci", |
| "volume": "33", |
| "issue": "", |
| "pages": "61--83", |
| "other_ids": { |
| "DOI": [ |
| "10.1017/S0140525X0999152X" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Joseph Henrich, Steven J. Heine, and Ara Norenzayan. 2010. The weirdest people in the world. Behav Brain Sci, 33:61-83.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Applying speech technologies to assess verbal memory in patients with serious mental illness", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Terje", |
| "suffix": "" |
| }, |
| { |
| "first": "Chelsea", |
| "middle": [], |
| "last": "Holmlund", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [ |
| "W" |
| ], |
| "last": "Chandler", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [ |
| "S" |
| ], |
| "last": "Foltz", |
| "suffix": "" |
| }, |
| { |
| "first": "Jian", |
| "middle": [], |
| "last": "Cohen", |
| "suffix": "" |
| }, |
| { |
| "first": "Jared", |
| "middle": [ |
| "C" |
| ], |
| "last": "Cheng", |
| "suffix": "" |
| }, |
| { |
| "first": "Elizabeth", |
| "middle": [ |
| "P" |
| ], |
| "last": "Bernstein", |
| "suffix": "" |
| }, |
| { |
| "first": "Brita", |
| "middle": [], |
| "last": "Rosenfeld", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Elvev\u00e5g", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1038/s41746-020-0241-7" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Terje B. Holmlund, Chelsea Chandler, Peter W. Foltz, Alex S. Cohen, Jian Cheng, Jared C. Bernstein, Eliz- abeth P. Rosenfeld, and Brita Elvev\u00e5g. 2020. Ap- plying speech technologies to assess verbal memory in patients with serious mental illness. npj Digital Medicine, 3.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Moving psychological assessment out of the controlled laboratory setting and into the hands of the individual: Practical challenges", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Terje", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [ |
| "W" |
| ], |
| "last": "Holmlund", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [ |
| "S" |
| ], |
| "last": "Foltz", |
| "suffix": "" |
| }, |
| { |
| "first": "H\u00e5vard", |
| "middle": [ |
| "D" |
| ], |
| "last": "Cohen", |
| "suffix": "" |
| }, |
| { |
| "first": "Randi", |
| "middle": [], |
| "last": "Johansen", |
| "suffix": "" |
| }, |
| { |
| "first": "P\u00e5l", |
| "middle": [], |
| "last": "Sigurdsen", |
| "suffix": "" |
| }, |
| { |
| "first": "Dagfinn", |
| "middle": [], |
| "last": "Fugelli", |
| "suffix": "" |
| }, |
| { |
| "first": "Jian", |
| "middle": [], |
| "last": "Bergsager", |
| "suffix": "" |
| }, |
| { |
| "first": "Jared", |
| "middle": [], |
| "last": "Cheng", |
| "suffix": "" |
| }, |
| { |
| "first": "Elizabeth", |
| "middle": [], |
| "last": "Bernstein", |
| "suffix": "" |
| }, |
| { |
| "first": "Brita", |
| "middle": [], |
| "last": "Rosenfeld", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Elvev\u00e5g", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Psychological Assessment", |
| "volume": "31", |
| "issue": "", |
| "pages": "292--303", |
| "other_ids": { |
| "DOI": [ |
| "10.1037/pas0000647" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Terje B. Holmlund, Peter W. Foltz, Alex S. Cohen, H\u00e5vard D. Johansen, Randi Sigurdsen, P\u00e5l Fugelli, Dagfinn Bergsager, Jian Cheng, Jared Bernstein, Elizabeth Rosenfeld, and Brita Elvev\u00e5g. 2019. Mov- ing psychological assessment out of the controlled laboratory setting and into the hands of the indi- vidual: Practical challenges. Psychological Assess- ment, 31:292-303.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Second opinion needed: communicating uncertainty in medical machine learning", |
| "authors": [ |
| { |
| "first": "Benjamin", |
| "middle": [], |
| "last": "Kompa", |
| "suffix": "" |
| }, |
| { |
| "first": "Jasper", |
| "middle": [], |
| "last": "Snoek", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [ |
| "L" |
| ], |
| "last": "Beam", |
| "suffix": "" |
| } |
| ], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1038/s41746-020-00367-3" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Benjamin Kompa, Jasper Snoek, , and Andrew L. Beam. 2021. Second opinion needed: communicat- ing uncertainty in medical machine learning. npj Digital Medicine, 4.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Automatic speech analysis for the assessment of patients with predementia and alzheimer's disease. Alzheimer's Dementia: Diagnosis", |
| "authors": [ |
| { |
| "first": "Alexandra", |
| "middle": [], |
| "last": "K\u00f6nig", |
| "suffix": "" |
| }, |
| { |
| "first": "Aharon", |
| "middle": [], |
| "last": "Satt", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Sorin", |
| "suffix": "" |
| }, |
| { |
| "first": "Ron", |
| "middle": [], |
| "last": "Hoory", |
| "suffix": "" |
| }, |
| { |
| "first": "Orith", |
| "middle": [], |
| "last": "Toledo-Ronen", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexandre", |
| "middle": [], |
| "last": "Derreumaux", |
| "suffix": "" |
| }, |
| { |
| "first": "Valeria", |
| "middle": [], |
| "last": "Manera", |
| "suffix": "" |
| }, |
| { |
| "first": "Frans", |
| "middle": [], |
| "last": "Verhey", |
| "suffix": "" |
| }, |
| { |
| "first": "Pauline", |
| "middle": [], |
| "last": "Aalten", |
| "suffix": "" |
| }, |
| { |
| "first": "Phillipe", |
| "middle": [ |
| "H" |
| ], |
| "last": "Robert", |
| "suffix": "" |
| }, |
| { |
| "first": "Renaud", |
| "middle": [], |
| "last": "David", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Assessment Disease Monitoring", |
| "volume": "1", |
| "issue": "", |
| "pages": "112--124", |
| "other_ids": { |
| "DOI": [ |
| "10.1038/s41746-020-00367-3" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alexandra K\u00f6nig, Aharon Satt, Alexander Sorin, Ron Hoory, Orith Toledo-Ronen, Alexandre Der- reumaux, Valeria Manera, Frans Verhey, Pauline Aalten, Phillipe H. Robert, and Renaud David. 2015. Automatic speech analysis for the assessment of patients with predementia and alzheimer's disease. Alzheimer's Dementia: Diagnosis, Assessment Dis- ease Monitoring, 1:112-124.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Stability-based model selection", |
| "authors": [ |
| { |
| "first": "Tilman", |
| "middle": [], |
| "last": "Lange", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Mikio", |
| "suffix": "" |
| }, |
| { |
| "first": "Volker", |
| "middle": [], |
| "last": "Braun", |
| "suffix": "" |
| }, |
| { |
| "first": "Joachim", |
| "middle": [ |
| "M" |
| ], |
| "last": "Roth", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Buhmann", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the 15th International Conference on Neural Information Processing Systems", |
| "volume": "", |
| "issue": "", |
| "pages": "633--642", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/089976604773717621" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tilman Lange, Mikio L. Braun, Volker Roth, and Joachim M. Buhmann. 2002. Stability-based model selection. In Proceedings of the 15th International Conference on Neural Information Processing Sys- tems, pages 633--642.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Neuropsychological assessment (5th Ed.)", |
| "authors": [ |
| { |
| "first": "Muriel", |
| "middle": [ |
| "D" |
| ], |
| "last": "Lezak", |
| "suffix": "" |
| }, |
| { |
| "first": "Diane", |
| "middle": [ |
| "B" |
| ], |
| "last": "Howieson", |
| "suffix": "" |
| }, |
| { |
| "first": "Erin", |
| "middle": [ |
| "D" |
| ], |
| "last": "Bigler", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Tranel", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Muriel D. Lezak, Diane B. Howieson, Erin D. Bigler, and Daniel Tranel. 2012. Neuropsychological as- sessment (5th Ed.). Oxford University Press.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Isolation forest", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Fei", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [ |
| "M" |
| ], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhi-Hua", |
| "middle": [], |
| "last": "Ting", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the Eighth IEEE International Conference on Data Mining", |
| "volume": "", |
| "issue": "", |
| "pages": "413--422", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ICDM.2008.17" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Fei T. Liu, Kai M. Ting, and Zhi-Hua Zhou. 2018. Iso- lation forest. In Proceedings of the Eighth IEEE In- ternational Conference on Data Mining, pages 413- 422, Pisa, Italy.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Where have all the women gone?: participant gender in epidemiological and non-epidemiological research of schizophrenia", |
| "authors": [ |
| { |
| "first": "Julia", |
| "middle": [], |
| "last": "Longenecker", |
| "suffix": "" |
| }, |
| { |
| "first": "Jamie", |
| "middle": [], |
| "last": "Genderson", |
| "suffix": "" |
| }, |
| { |
| "first": "Dwight", |
| "middle": [], |
| "last": "Dickinson", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Malley", |
| "suffix": "" |
| }, |
| { |
| "first": "Brita", |
| "middle": [], |
| "last": "Elvev\u00e5g", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [ |
| "R" |
| ], |
| "last": "Weinberger", |
| "suffix": "" |
| }, |
| { |
| "first": "James", |
| "middle": [], |
| "last": "Gold", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Schizophrenia Research", |
| "volume": "119", |
| "issue": "", |
| "pages": "240--245", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/j.schres.2010.03.023" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Julia Longenecker, Jamie Genderson, Dwight Dickin- son, James Malley, Brita Elvev\u00e5g, Daniel R. Wein- berger, and James Gold. 2010. Where have all the women gone?: participant gender in epidemiologi- cal and non-epidemiological research of schizophre- nia. Schizophrenia Research, 119:240-245.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Giving voice to vulnerable children: Machine learning analysis of speech detects anxiety and depression in early childhood", |
| "authors": [ |
| { |
| "first": "Ellen", |
| "middle": [ |
| "W" |
| ], |
| "last": "Mcginnis", |
| "suffix": "" |
| }, |
| { |
| "first": "Steven", |
| "middle": [ |
| "P" |
| ], |
| "last": "Anderau", |
| "suffix": "" |
| }, |
| { |
| "first": "Jessica", |
| "middle": [], |
| "last": "Hruschak", |
| "suffix": "" |
| }, |
| { |
| "first": "Reed", |
| "middle": [ |
| "D" |
| ], |
| "last": "Gurchiek", |
| "suffix": "" |
| }, |
| { |
| "first": "Nestor", |
| "middle": [ |
| "L" |
| ], |
| "last": "Lopez-Duran", |
| "suffix": "" |
| }, |
| { |
| "first": "Kate", |
| "middle": [], |
| "last": "Fitzgerald", |
| "suffix": "" |
| }, |
| { |
| "first": "Katherine", |
| "middle": [ |
| "L" |
| ], |
| "last": "Rosenblum", |
| "suffix": "" |
| }, |
| { |
| "first": "Maria", |
| "middle": [], |
| "last": "Muzik", |
| "suffix": "" |
| }, |
| { |
| "first": "Ryan", |
| "middle": [ |
| "S" |
| ], |
| "last": "Mcginnis", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "IEEE Journal of Biomedical and Health Informatics", |
| "volume": "23", |
| "issue": "", |
| "pages": "2294--2301", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/JBHI.2019.2913590" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ellen W. McGinnis, Steven P. Anderau, Jessica Hr- uschak, Reed D. Gurchiek, Nestor L. Lopez-Duran, Kate Fitzgerald, Katherine L. Rosenblum, Maria Muzik, and Ryan S. McGinnis. 2019. Giving voice to vulnerable children: Machine learning analysis of speech detects anxiety and depression in early child- hood. IEEE Journal of Biomedical and Health In- formatics, 23:2294-2301.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Use of emergency department electronic medical records for automated epidemiological surveillance of suicide attempts: a french pilot study", |
| "authors": [ |
| { |
| "first": "Marie-H\u00e9l\u00e8ne", |
| "middle": [], |
| "last": "Metzger", |
| "suffix": "" |
| }, |
| { |
| "first": "Nastassia", |
| "middle": [], |
| "last": "Tvardik", |
| "suffix": "" |
| }, |
| { |
| "first": "Quentin", |
| "middle": [], |
| "last": "Gicquel", |
| "suffix": "" |
| }, |
| { |
| "first": "C\u00f4me", |
| "middle": [], |
| "last": "Bouvry", |
| "suffix": "" |
| }, |
| { |
| "first": "Emmanuel", |
| "middle": [], |
| "last": "Poulet", |
| "suffix": "" |
| }, |
| { |
| "first": "V\u00e9ronique", |
| "middle": [], |
| "last": "Potinet-Pagliaroli", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "International Journal of Methods in Psychiatric Research", |
| "volume": "26", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1002/mpr.1522" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marie-H\u00e9l\u00e8ne Metzger, Nastassia Tvardik, Quentin Gicquel, C\u00f4me Bouvry, Emmanuel Poulet, and V\u00e9ronique Potinet-Pagliaroli. 2017. Use of emer- gency department electronic medical records for au- tomated epidemiological surveillance of suicide at- tempts: a french pilot study. International Journal of Methods in Psychiatric Research, 26:e1522.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Practical solutions for machine learning safety in autonomous vehicles", |
| "authors": [ |
| { |
| "first": "Sina", |
| "middle": [], |
| "last": "Mohseni", |
| "suffix": "" |
| }, |
| { |
| "first": "Mandar", |
| "middle": [], |
| "last": "Pitale", |
| "suffix": "" |
| }, |
| { |
| "first": "Vasu", |
| "middle": [], |
| "last": "Singh", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhangyang", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "The AAAI Workshop on Artificial Intelligence Safety", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sina Mohseni, Mandar Pitale, Vasu Singh, and Zhangyang Wang. 2020. Practical solutions for ma- chine learning safety in autonomous vehicles. In The AAAI Workshop on Artificial Intelligence Safety (Safe AI).", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Deep language space neural network for classifying mild cognitive impairment and alzheimer-type dementia", |
| "authors": [ |
| { |
| "first": "Jojo", |
| "middle": [], |
| "last": "Sylvester Olubolu Orimaye", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Sze-Meng", |
| "suffix": "" |
| }, |
| { |
| "first": "Chee Piau", |
| "middle": [], |
| "last": "Wong", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Wong", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "PLoS ONE", |
| "volume": "13", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1371/journal.pone.0205636" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sylvester Olubolu Orimaye, Jojo Sze-Meng Wong, and Chee Piau Wong. 2018. Deep language space neu- ral network for classifying mild cognitive impair- ment and alzheimer-type dementia. PLoS ONE, 13:e0205636.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Chaos and schizophrenia: does the method fit the madness?", |
| "authors": [ |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Martin", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "L" |
| ], |
| "last": "Paulus", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Braff", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Neuroscience Perspectives", |
| "volume": "53", |
| "issue": "", |
| "pages": "3--11", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/S0006-3223(02)01701-8" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Martin T. Paulus and David L. Braff. 2003. Chaos and schizophrenia: does the method fit the madness? Neuroscience Perspectives, 53:3-11.", |
| "links": null |
| }, |
| "BIBREF34": { |
| "ref_id": "b34", |
| "title": "Multi-kernel svm based depression recognition using social media data", |
| "authors": [ |
| { |
| "first": "Zhichao", |
| "middle": [], |
| "last": "Peng", |
| "suffix": "" |
| }, |
| { |
| "first": "Qinghua", |
| "middle": [], |
| "last": "Hu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jianwu", |
| "middle": [], |
| "last": "Dang", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "International Journal of Machine Learning and Cybernetics", |
| "volume": "10", |
| "issue": "", |
| "pages": "43--57", |
| "other_ids": { |
| "DOI": [ |
| "10.1007/s13042-017-0697-1" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhichao Peng, Qinghua Hu, and Jianwu Dang. 2019. Multi-kernel svm based depression recognition us- ing social media data. International Journal of Ma- chine Learning and Cybernetics, 10:43-57.", |
| "links": null |
| }, |
| "BIBREF35": { |
| "ref_id": "b35", |
| "title": "Spoken language derived measures for detecting mild cognitive impairment", |
| "authors": [ |
| { |
| "first": "Brian", |
| "middle": [], |
| "last": "Roark", |
| "suffix": "" |
| }, |
| { |
| "first": "Margaret", |
| "middle": [], |
| "last": "Mitchell", |
| "suffix": "" |
| }, |
| { |
| "first": "John-Paul", |
| "middle": [], |
| "last": "Hosom", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristy", |
| "middle": [], |
| "last": "Hollingshead", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Kaye", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "IEEE Transactions on Audio, Speech, and Language Processing", |
| "volume": "19", |
| "issue": "", |
| "pages": "2081--2090", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/TASL.2011.2112351" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Brian Roark, Margaret Mitchell, John-Paul Hosom, Kristy Hollingshead, and Jeffrey Kaye. 2011. Spo- ken language derived measures for detecting mild cognitive impairment. IEEE Transactions on Audio, Speech, and Language Processing, 19:2081--2090.", |
| "links": null |
| }, |
| "BIBREF36": { |
| "ref_id": "b36", |
| "title": "Language as a biomarker in those at high-risk for psychosis. Schizophrenia Research", |
| "authors": [ |
| { |
| "first": "Mark", |
| "middle": [], |
| "last": "Rosenstein", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [ |
| "W" |
| ], |
| "last": "Foltz", |
| "suffix": "" |
| }, |
| { |
| "first": "Lynn", |
| "middle": [ |
| "E" |
| ], |
| "last": "Delisi", |
| "suffix": "" |
| }, |
| { |
| "first": "Brita", |
| "middle": [], |
| "last": "Elvev\u00e5g", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "165", |
| "issue": "", |
| "pages": "249--250", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/j.schres.2015.04.023" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mark Rosenstein, Peter W. Foltz, Lynn E. DeLisi, and Brita Elvev\u00e5g. 2015. Language as a biomarker in those at high-risk for psychosis. Schizophrenia Re- search, 165:249--250.", |
| "links": null |
| }, |
| "BIBREF37": { |
| "ref_id": "b37", |
| "title": "Quantifying the impact of chronic conditions on a diagnosis of major depressive disorder in adults: a cohort study using linked electronic medical records", |
| "authors": [ |
| { |
| "first": "Euijung", |
| "middle": [], |
| "last": "Ryu", |
| "suffix": "" |
| }, |
| { |
| "first": "Alanna", |
| "middle": [ |
| "M" |
| ], |
| "last": "Chamberlain", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [ |
| "S" |
| ], |
| "last": "Pendegraft", |
| "suffix": "" |
| }, |
| { |
| "first": "Tanya", |
| "middle": [ |
| "M" |
| ], |
| "last": "Petterson", |
| "suffix": "" |
| }, |
| { |
| "first": "William", |
| "middle": [ |
| "V" |
| ], |
| "last": "Bobo", |
| "suffix": "" |
| }, |
| { |
| "first": "Jyotishman", |
| "middle": [], |
| "last": "Pathak", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "BMC Psychiatry", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1186/s12888-016-0821-x" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Euijung Ryu, Alanna M. Chamberlain, Richard S. Pen- degraft, Tanya M. Petterson, William V. Bobo, , and Jyotishman Pathak. 2016. Quantifying the impact of chronic conditions on a diagnosis of major depres- sive disorder in adults: a cohort study using linked electronic medical records. BMC Psychiatry, 16.", |
| "links": null |
| }, |
| "BIBREF38": { |
| "ref_id": "b38", |
| "title": "Machine learning in mental health: A scoping review of methods and applications", |
| "authors": [ |
| { |
| "first": "B", |
| "middle": [ |
| "R" |
| ], |
| "last": "Adrian", |
| "suffix": "" |
| }, |
| { |
| "first": "Delyse", |
| "middle": [ |
| "M" |
| ], |
| "last": "Shatte", |
| "suffix": "" |
| }, |
| { |
| "first": "Samantha", |
| "middle": [ |
| "J" |
| ], |
| "last": "Hutchinson", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Teague", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Psychological Medicine", |
| "volume": "49", |
| "issue": "", |
| "pages": "1426--1448", |
| "other_ids": { |
| "DOI": [ |
| "10.1017/S0033291719000151" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adrian B. R. Shatte, Delyse M. Hutchinson, and Samantha J. Teague. 2019. Machine learning in mental health: A scoping review of methods and ap- plications. Psychological Medicine, 49:1426-1448.", |
| "links": null |
| }, |
| "BIBREF39": { |
| "ref_id": "b39", |
| "title": "Deep learning in mental health outcome research: a scoping review", |
| "authors": [ |
| { |
| "first": "Chang", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhenxing", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Jyotishman", |
| "middle": [], |
| "last": "Pathak", |
| "suffix": "" |
| }, |
| { |
| "first": "Fei", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Transl Psychiatry", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1038/s41398-020-0780-3" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Chang Su, Zhenxing Xu, Jyotishman Pathak, and Fei Wang. 2020. Deep learning in mental health out- come research: a scoping review. Transl Psychiatry, 10.", |
| "links": null |
| }, |
| "BIBREF40": { |
| "ref_id": "b40", |
| "title": "Machine learning in mental health: A systematic review of the hci literature to support the development of effective and implementable ml systems", |
| "authors": [ |
| { |
| "first": "Anja", |
| "middle": [], |
| "last": "Thieme", |
| "suffix": "" |
| }, |
| { |
| "first": "Danielle", |
| "middle": [], |
| "last": "Belgrave", |
| "suffix": "" |
| }, |
| { |
| "first": "Gavin", |
| "middle": [], |
| "last": "Doherty", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "ACM Transactions on Computer-Human Interaction", |
| "volume": "27", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/3398069" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anja Thieme, Danielle Belgrave, and Gavin Doherty. 2020. Machine learning in mental health: A system- atic review of the hci literature to support the devel- opment of effective and implementable ml systems. ACM Transactions on Computer-Human Interaction, 27:Article 34.", |
| "links": null |
| }, |
| "BIBREF41": { |
| "ref_id": "b41", |
| "title": "Technical note: Bias and the quantification of stability", |
| "authors": [ |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Turney", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "Machine Learning", |
| "volume": "20", |
| "issue": "", |
| "pages": "23--33", |
| "other_ids": { |
| "DOI": [ |
| "10.1023/A:1022682001417" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Peter Turney. 1995. Technical note: Bias and the quan- tification of stability. Machine Learning, 20:23-33.", |
| "links": null |
| }, |
| "BIBREF42": { |
| "ref_id": "b42", |
| "title": "Extracting diagnoses and investigation results from unstructured text in electronic health records by semi-supervised machine learning", |
| "authors": [ |
| { |
| "first": "Zhuoran", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Anoop", |
| "middle": [ |
| "D" |
| ], |
| "last": "Shah", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [ |
| "Rosemary" |
| ], |
| "last": "Tate", |
| "suffix": "" |
| }, |
| { |
| "first": "Spiros", |
| "middle": [], |
| "last": "Denaxas", |
| "suffix": "" |
| }, |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Shawe-Taylor", |
| "suffix": "" |
| }, |
| { |
| "first": "Harry", |
| "middle": [], |
| "last": "Hemingway", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "PLoS One", |
| "volume": "7", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1371/journal.pone.0030412" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhuoran Wang, Anoop D. Shah, A. Rosemary Tate, Spiros Denaxas, John Shawe-Taylor, and Harry Hemingway. 2012. Extracting diagnoses and inves- tigation results from unstructured text in electronic health records by semi-supervised machine learning. PLoS One, 7:e30412.", |
| "links": null |
| }, |
| "BIBREF43": { |
| "ref_id": "b43", |
| "title": "Wechsler Memory Scale -Third Edition, WMS-III: Administration and scoring manual. The Psychological Corporation", |
| "authors": [ |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Wechsler", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "David Wechsler. 1997. Wechsler Memory Scale -Third Edition, WMS-III: Administration and scoring man- ual. The Psychological Corporation.", |
| "links": null |
| }, |
| "BIBREF44": { |
| "ref_id": "b44", |
| "title": "When optimism hurts: inflated predictions in psychiatric neuroimaging", |
| "authors": [ |
| { |
| "first": "Robert", |
| "middle": [], |
| "last": "Whelan", |
| "suffix": "" |
| }, |
| { |
| "first": "Hugh", |
| "middle": [], |
| "last": "Garavan", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Biol Psychiatry", |
| "volume": "75", |
| "issue": "", |
| "pages": "746--748", |
| "other_ids": { |
| "DOI": [ |
| "10.1016/j.biopsych.2013.05.014" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Robert Whelan and Hugh Garavan. 2014. When opti- mism hurts: inflated predictions in psychiatric neu- roimaging. Biol Psychiatry, 75:746-748.", |
| "links": null |
| }, |
| "BIBREF45": { |
| "ref_id": "b45", |
| "title": "Detecting causality from online psychiatric texts using inter-sentential language patterns", |
| "authors": [ |
| { |
| "first": "Jheng-Long", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Liang-Chih", |
| "middle": [], |
| "last": "Yu", |
| "suffix": "" |
| }, |
| { |
| "first": "Pei-Chann", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "BMC Medical Informatics and Decision Making", |
| "volume": "12", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1186/1472-6947-12-72" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jheng-Long Wu, Liang-Chih Yu, and Pei-Chann Chang. 2012. Detecting causality from online psy- chiatric texts using inter-sentential language pat- terns. BMC Medical Informatics and Decision Mak- ing, 12.", |
| "links": null |
| }, |
| "BIBREF46": { |
| "ref_id": "b46", |
| "title": "Bertscore: Evaluating text generation with bert", |
| "authors": [ |
| { |
| "first": "Tianyi", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Varsha", |
| "middle": [], |
| "last": "Kishore", |
| "suffix": "" |
| }, |
| { |
| "first": "Felix", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| }, |
| { |
| "first": "Kilian", |
| "middle": [ |
| "Q" |
| ], |
| "last": "Weinberger", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoav", |
| "middle": [], |
| "last": "Artzi", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tianyi Zhang, Varsha Kishore, Felix Wu, Kilian Q. Weinberger, and Yoav Artzi. 2020. Bertscore: Eval- uating text generation with bert. In International Conference on Learning Representations.", |
| "links": null |
| }, |
| "BIBREF47": { |
| "ref_id": "b47", |
| "title": "Class noise vs. attribute noise: A quantitative study", |
| "authors": [ |
| { |
| "first": "Xingquan", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Xindong", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Artificial Intelligence Review", |
| "volume": "22", |
| "issue": "", |
| "pages": "177--210", |
| "other_ids": { |
| "DOI": [ |
| "10.1007/s10462-004-0751-8" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xingquan Zhu and Xindong Wu. 2004. Class noise vs. attribute noise: A quantitative study. Artificial Intelligence Review, 22:177-210.", |
| "links": null |
| }, |
| "BIBREF48": { |
| "ref_id": "b48", |
| "title": "Clpsych 2019 shared task: Predicting the degree of suicide risk in reddit posts", |
| "authors": [ |
| { |
| "first": "Ayah", |
| "middle": [], |
| "last": "Zirikly", |
| "suffix": "" |
| }, |
| { |
| "first": "Philip", |
| "middle": [], |
| "last": "Resnik", |
| "suffix": "" |
| }, |
| { |
| "first": "\u00d6zlem", |
| "middle": [], |
| "last": "Uzuner", |
| "suffix": "" |
| }, |
| { |
| "first": "Kristy", |
| "middle": [], |
| "last": "Hollingshead", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Sixth Workshop on Computational Linguistics and Clinical Psychology", |
| "volume": "", |
| "issue": "", |
| "pages": "24--33", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/W19-3003" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ayah Zirikly, Philip Resnik, \u00d6zlem Uzuner, and Kristy Hollingshead. 2019. Clpsych 2019 shared task: Pre- dicting the degree of suicide risk in reddit posts. In Proceedings of the Sixth Workshop on Compu- tational Linguistics and Clinical Psychology, pages 24-33.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "num": null, |
| "uris": null, |
| "text": "Scatter plot depicting the relationship between the number of common word types and the BERTScore of each example. The color represents the absolute error between the model rating and the human rating in each instance. Cross symbols indicate attribute noise (with two specific examples colored red and detailed at a high level in the text)." |
| }, |
| "TABREF2": { |
| "num": null, |
| "html": null, |
| "type_str": "table", |
| "content": "<table/>", |
| "text": "" |
| } |
| } |
| } |
| } |