| { |
| "paper_id": "2020", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T12:26:55.781544Z" |
| }, |
| "title": "Investigating Rich Feature Sources for Conceptual Representation Encoding", |
| "authors": [ |
| { |
| "first": "Lu", |
| "middle": [], |
| "last": "Cao", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Singapore University of Technology and Design", |
| "location": { |
| "country": "Singapore" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Yulong", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Zhejiang University", |
| "location": { |
| "country": "China" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Dandan", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Westlake University", |
| "location": { |
| "country": "China" |
| } |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Yue", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Westlake University", |
| "location": { |
| "country": "China" |
| } |
| }, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Functional Magnetic Resonance Imaging (fMRI) provides a means to investigate human conceptual representation in cognitive and neuroscience studies, where researchers predict the fMRI activations with elicited stimuli inputs. Previous work mainly uses a single source of features, particularly linguistic features, to predict fMRI activations. However, relatively little work has been done on investigating rich-source features for conceptual representation. In this paper, we systematically compare the linguistic, visual as well as auditory input features in conceptual representation, and further introduce associative conceptual features, which are obtained from Small World of Words game, to predict fMRI activations. Our experimental results show that those rich-source features can enhance performance in predicting the fMRI activations. Our analysis indicates that information from rich sources is present in the conceptual representation of human brains. In particular, the visual feature weights the most on conceptual representation, which is consistent with the recent cognitive science study.", |
| "pdf_parse": { |
| "paper_id": "2020", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Functional Magnetic Resonance Imaging (fMRI) provides a means to investigate human conceptual representation in cognitive and neuroscience studies, where researchers predict the fMRI activations with elicited stimuli inputs. Previous work mainly uses a single source of features, particularly linguistic features, to predict fMRI activations. However, relatively little work has been done on investigating rich-source features for conceptual representation. In this paper, we systematically compare the linguistic, visual as well as auditory input features in conceptual representation, and further introduce associative conceptual features, which are obtained from Small World of Words game, to predict fMRI activations. Our experimental results show that those rich-source features can enhance performance in predicting the fMRI activations. Our analysis indicates that information from rich sources is present in the conceptual representation of human brains. In particular, the visual feature weights the most on conceptual representation, which is consistent with the recent cognitive science study.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "How a simple concept is represented and organized by human brain has been of long research interest in cognitive science and natural language processing (NLP) (Ishai et al., 1999; Martin, 2007; Fernandino et al., 2016) . The rise of brain imaging methods such as fMRI technology has now made it feasible to investigate conceptual representation within human brain. In particular, fMRI is a technique that allows for the visualization of neuron activity in brain regions, which has become an essential tool for analyzing the neural correlates of brain activity in recent decades (Mitchell et al., 2004; Mitchell et al., 2008; Pereira et al., 2009; Pereira et al., 2011; Just et al., 2010) .", |
| "cite_spans": [ |
| { |
| "start": 159, |
| "end": 179, |
| "text": "(Ishai et al., 1999;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 180, |
| "end": 193, |
| "text": "Martin, 2007;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 194, |
| "end": 218, |
| "text": "Fernandino et al., 2016)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 578, |
| "end": 601, |
| "text": "(Mitchell et al., 2004;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 602, |
| "end": 624, |
| "text": "Mitchell et al., 2008;", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 625, |
| "end": 646, |
| "text": "Pereira et al., 2009;", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 647, |
| "end": 668, |
| "text": "Pereira et al., 2011;", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 669, |
| "end": 687, |
| "text": "Just et al., 2010)", |
| "ref_id": "BIBREF17" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Neuroscientists have shown that distinct patterns of neural activation are associated with both encoding and decoding the concepts of different semantic categories in brains. Mitchell et al. (2008) first introduced the task of predicting fMRI activation and proposed a featured-based model which takes a semantic representation of a single noun to predict the fMRI activation elicited by that noun. Subsequent studies (Pereira et al., 2018) introduced distributed based methods to build correlations between distributed semantic representations and patterns of neural activation. However, previous work mostly focuses on a single source of input features, e.g. count-based word vectors (Devereux et al., 2010; Murphy et al., 2012; Pereira et al., 2013; Pereira et al., 2018) to explore the in brain encoding process, which builds correlation between neural signals and distributed representation, and thus can be useful for better understanding both the brain and the word representation. But there has been little work systematically investigating the effect of different modalities on predicting fMRI activations.", |
| "cite_spans": [ |
| { |
| "start": 175, |
| "end": 197, |
| "text": "Mitchell et al. (2008)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 418, |
| "end": 440, |
| "text": "(Pereira et al., 2018)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 686, |
| "end": 709, |
| "text": "(Devereux et al., 2010;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 710, |
| "end": 730, |
| "text": "Murphy et al., 2012;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 731, |
| "end": 752, |
| "text": "Pereira et al., 2013;", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 753, |
| "end": 774, |
| "text": "Pereira et al., 2018)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We address this limitation by empirically investigating two forms of rich source features: multimodal features and associative conceptual feature. First, we systematically compare input features that come from linguistic, visual and auditory sources into fMRI activation encoding. To investigate the influence of each source of information in the brain conceptual representation, we build and evaluate a multimodal", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Stimuli Presentation mode Subj. Mitchell et al. (2008) 60 concrete nouns Word, Image 9 Pereira et al. (2018) 180 words Word cloud, Sentence, Image 16 (Mitchell et al., 2008 ).", |
| "cite_spans": [ |
| { |
| "start": 32, |
| "end": 54, |
| "text": "Mitchell et al. (2008)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 87, |
| "end": 108, |
| "text": "Pereira et al. (2018)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 150, |
| "end": 172, |
| "text": "(Mitchell et al., 2008", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Reference", |
| "sec_num": null |
| }, |
| { |
| "text": "conceptual representation model with different modal input features and their combinations. Second, we investigate associative thinking of related concepts. We assume that associative thinking for concepts has individual difference, and it is insufficient to reflect such differences via distributed semantics representation. To verify this assumption, we propose an associative conceptual embedding that predicts brain activity by using associative conceptual words other than the concept presented to the subjects when collecting the brain activity data. Experiments of multi-sense representation show that not only linguistic features, but also visual and auditory features, can be used to predict fMRI activations. It demonstrates that multimodal information is present in the conceptual representation in human brains, and we also observe that the weights of various modalities in brain conceptual representation are unequal. In particular, we find that performances of visual feature grounded multimodal models are overall improved compared with unimodal models, while the performances of auditory feature grounded models are not consistently improved. This observation leads to a conclusion that the visual information weights the most in brain conceptual representations. In addition, experiments of associative conceptual representation show that the associative conceptual words, which though are distinct in distributed semantic vector space, are related in conceptual representation in human brains.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Reference", |
| "sec_num": null |
| }, |
| { |
| "text": "Previous studies on conceptual representation mainly focus on correlation between words and corresponding fMRI activations, including feature based methods and distributed representation based methods. Seminal work of Mitchell et al. (2008) pioneered the use of corpus-derived word representations to predict brain activation data associated with the meaning of nouns. This feature based method selected 25 verbs (i.e., 'see ', 'say', 'taste'.) , and calculated the co-occurrence frequency of the noun with each of 25 verbs. In this regard, a noun word is encoded into 25 sensor-motor features. Subsequent work including Jelodar et al. (2010) used WordNet (Miller, 1995) to compute the values of the features. Obviously, such feature based methods are constrained by corpora, and only focus on linguistic unimodal. Pereira et al. (2013) proposed a distributed semantics based method using features learnt form Wikipeida to predict neural activations for unseen concepts. Since then, various studies have shown that distributed semantic representations have correlations with brain concept representation (Devereux et al., 2010; Murphy et al., 2012; Pereira et al., 2013; Pereira et al., 2018; Bulat et al., 2017) . However, though these methods outperform the feature based methods, they still ignore the fact that the information in the real world comes as different modalities. In contrast to their work, we investigate the human conceptual representation mechanism via evaluating the effects of multimodal features rather than only unimodal linguistic feature.", |
| "cite_spans": [ |
| { |
| "start": 218, |
| "end": 240, |
| "text": "Mitchell et al. (2008)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 425, |
| "end": 444, |
| "text": "', 'say', 'taste'.)", |
| "ref_id": null |
| }, |
| { |
| "start": 656, |
| "end": 670, |
| "text": "(Miller, 1995)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 815, |
| "end": 836, |
| "text": "Pereira et al. (2013)", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 1104, |
| "end": 1127, |
| "text": "(Devereux et al., 2010;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 1128, |
| "end": 1148, |
| "text": "Murphy et al., 2012;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 1149, |
| "end": 1170, |
| "text": "Pereira et al., 2013;", |
| "ref_id": "BIBREF32" |
| }, |
| { |
| "start": 1171, |
| "end": 1192, |
| "text": "Pereira et al., 2018;", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 1193, |
| "end": 1212, |
| "text": "Bulat et al., 2017)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "More closely related to our work, Bulat et al. (2017) presented a systematic evaluation and comparison of unimodal and multimodal semantic models in their ability to predict patterns of conceptual representation in the human brain. However, they only focused on the model level, contrasting unimodal representations and multimodal representations that involve linguistic and visual signals, but not the effect of each modality. While little previous work studied the influence of each source of information in the brain conceptual representation, our study is more extensive by evaluating multiple modalities data and their combinations. To our knowledge, we are the first to report auditory data in exploring human conceptual representations. More vitally, we explore their importance in concrete noun representations. Different from all work above, we are also the first to introduce associative conceptual words as input features to human conceptual representation.", |
| "cite_spans": [ |
| { |
| "start": 34, |
| "end": 53, |
| "text": "Bulat et al. (2017)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "The task is to predict the corresponding fMRI activations with elicited stimuli. The encoder operates by predicting fMRI activation given feature vectors. Each dimension (voxel) of fMRI activation is predicted by using a separate ridge regression estimator. More formally, given the matrix X and the matrix Z, we learn regression coefficients b and b 0 that minimize", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task: Predicting the fMRI Activation", |
| "sec_num": "3" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "Xb + b 0 \u2212 z 2 + \u03b1 b 2", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Task: Predicting the fMRI Activation", |
| "sec_num": "3" |
| }, |
| { |
| "text": "for each column of z of Z matrix. X is the semantic matrix, the dimension is the number of words (training set) by the dimension of semantic vector (300 for GloVe); and Z is the corresponding fMRI activation matrix, the dimension is the number of fMRI activation by the imaging dimension (amount of selected voxel, 500 for Mitchell et al. (2008) dataset and 5000 for Pereira et al. (2018) dataset). We investigate three types of multi-sense inputs, namely, linguistic, visual and auditory sources. And further we use associative conceptual input, namely, the associative conceptual words which is obtained from Small World of Word game. In the next two sections, we will introduce how to obtain multi-sense representations and associative conceptual representations.", |
| "cite_spans": [ |
| { |
| "start": 323, |
| "end": 345, |
| "text": "Mitchell et al. (2008)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Task: Predicting the fMRI Activation", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Following Bruni et al. (2014) and Kiela and Bottou (2014) , we construct multimodal semantic representation vector, V m , by concatenating the linguistic, visual and auditory representations as shown in Figure 1 :", |
| "cite_spans": [ |
| { |
| "start": 10, |
| "end": 29, |
| "text": "Bruni et al. (2014)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 34, |
| "end": 57, |
| "text": "Kiela and Bottou (2014)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 203, |
| "end": 212, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Multi-Sense Representations", |
| "sec_num": "4" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "V m = V linguistic V visual V auditory ,", |
| "eq_num": "(2)" |
| } |
| ], |
| "section": "Multi-Sense Representations", |
| "sec_num": "4" |
| }, |
| { |
| "text": "where is the concatenation operator.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-Sense Representations", |
| "sec_num": "4" |
| }, |
| { |
| "text": "The linguistic representation can be a dense vector that represents a word associated with a concept. Distributed word representations have been applied to statistical language modeling with considerable success (Bengio et al., 2003) . This idea has enabled a substantial amount of progress in a wide range of NLP task, and was also shown useful for brain conceptual representation (Devereux et al., 2010; Murphy et al., 2012) . The approach is based on the distributional hypothesis (Firth, 1957; Harris, 1954) which assumes that words with similar contexts tend to have similar semantic meaning. The intuition underlying the model is ratios of word-word co-occurrence probabilities have the potential for encoding some form of meaning. GloVe (Pennington et al., 2014) provides multiple versions of pre-trained word embeddings. In this paper, we use a 300-dimensional version of GloVe, which trained on a corpus consisting of Wikipedia 2014 and Gigaword 5.", |
| "cite_spans": [ |
| { |
| "start": 212, |
| "end": 233, |
| "text": "(Bengio et al., 2003)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 382, |
| "end": 405, |
| "text": "(Devereux et al., 2010;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 406, |
| "end": 426, |
| "text": "Murphy et al., 2012)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 484, |
| "end": 497, |
| "text": "(Firth, 1957;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 498, |
| "end": 511, |
| "text": "Harris, 1954)", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 744, |
| "end": 769, |
| "text": "(Pennington et al., 2014)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Linguistic Representations", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Visual representation is used to represent an image associated with a concept in a dense vector. Our approach to constructing the visual representations component is to utilize a collection of images associated with words representing a particular concept. For example, given a stimulus 'carrot', the associated images are a collection of 'carrot' images that we retrieve from the dataset. In our implementation, we use Deep Residual Network (ResNet) (He et al., 2016) to produce the image feature map.", |
| "cite_spans": [ |
| { |
| "start": 451, |
| "end": 468, |
| "text": "(He et al., 2016)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Visual Representations", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "ResNet is widely used in image recognition as it is a deep neural network with many convolution layers stack together and can extract rich image features. The network is pre-trained on ImageNet (Deng et al., 2009) , one of the largest image databases. Then, we chop the last layer of the network and use the remaining part as the feature extractor to compute the 2048-dimensional feature vector for each image. To represent a particular concept, we extract the image features of all images belong to that concept. Then, we directly compute the average of all image features as the visual representation.", |
| "cite_spans": [ |
| { |
| "start": 194, |
| "end": 213, |
| "text": "(Deng et al., 2009)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Visual Representations", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Auditory representation is a dense vector used to present the acoustic properties of a concept. For example, given the concept 'key', correlated sounds are keys hitting or rubbing together; and for 'hand', correlated sounds can be applause. For the auditory representations, we retrieve 3 to 100 audios from Freesound (Font et al., 2013) for each concept. To generate the auditory representation for each noun, we first obtain Mel-scale Frequency Cepstral Coefficients (MFCCs) (O'Shaughnessy, 1987) features of each audio and then quantize the features into a bag of audio words (BoAW) (Foote, 1997) representations. MFCCs are commonly used as features in speech recognition, information retrieval, and music analysis. After obtaining a BoAW set, we take the mean of each BoAW as the auditory representation. In this paper, we use MMFeat (Kiela, 2016) to generate 300-dimensional auditory representations. The code is available at https://github.com/douwekiela/mmfeat.", |
| "cite_spans": [ |
| { |
| "start": 318, |
| "end": 337, |
| "text": "(Font et al., 2013)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 586, |
| "end": 599, |
| "text": "(Foote, 1997)", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 838, |
| "end": 851, |
| "text": "(Kiela, 2016)", |
| "ref_id": "BIBREF20" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Auditory Representations", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "Associative conceptual representation is a dense vector obtained from the associative conceptual words that are produced by humans in a game scene, and it is used to presented human's associative thinking related a concept. To investigate that whether associative thinking can be reflected in the fMRI activation, we fuse the word vectors linearly and use it as our associative conceptual representations. The linear fusion is represented as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Associative Conceptual Representation", |
| "sec_num": "5" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "V m = V stimuli V associate ,", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Associative Conceptual Representation", |
| "sec_num": "5" |
| }, |
| { |
| "text": "where is the concatenation operator.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Associative Conceptual Representation", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We apply three sources of features to predict fMRI activations with unimodal model and multimodal models, and compare their performances. Further, we compare the performances of models with irrelevant words and associative conceptual words as inputs respectively. Figure 2 : Mean \u00b1 SE accuracies of participants for all modals of data, using results in Table 3 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 264, |
| "end": 272, |
| "text": "Figure 2", |
| "ref_id": null |
| }, |
| { |
| "start": 353, |
| "end": 360, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "6" |
| }, |
| { |
| "text": "6.1 Datasets", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments", |
| "sec_num": "6" |
| }, |
| { |
| "text": "In this paper, we use the fMRI activation datasets of Mitchell et al. (2008) and Pereira et al. (2018) . The summary of the datasets is shown in Table 1 . Mitchell et al. (2008) 's fMRI activation dataset was collected from nine right-handed subjects (5 females and 4 males between 18 and 32 years old). Each time, every subject was presented with noun labels and line drawings of 60 concrete objects from 12 semantic categories with 5 exemplars per category and the corresponding fMRI activation was recorded. The 60 concrete nouns and categories are shown in Table 2 . Each exemplar was presented six times with randomly permutation and each exemplar was presented 3 seconds followed by a 7 seconds rest period. During the exemplar presenting, subjects were required to think about the proprieties of it freely. For example, for the concept 'dog', the proprieties might be 'pet', 'fluffy', and 'labrador retrievers'. It is not required to obtain consistency properties across subjects. Given an exemplar, the fMRI activation of each subject was recorded during the presenting each of the six times. In this paper, we create one representative fMRI activation for each exemplar by averaging six scans.", |
| "cite_spans": [ |
| { |
| "start": 54, |
| "end": 76, |
| "text": "Mitchell et al. (2008)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 81, |
| "end": 102, |
| "text": "Pereira et al. (2018)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 155, |
| "end": 177, |
| "text": "Mitchell et al. (2008)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 145, |
| "end": 152, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| }, |
| { |
| "start": 561, |
| "end": 568, |
| "text": "Table 2", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "fMRI Datasets", |
| "sec_num": "6.1.1" |
| }, |
| { |
| "text": "Pereira et al. (2018)'s fMRI activation dataset was collected from 16 subjects. Similarly to Mitchell et al. (2008) , subjects were asked to think about the properties when they were presented with stimulus in form of words, pictures and sentences. But the exemplar words of Pereira et al. (2018) cover a broader semantic vector space and are more distinct in vector space. First, they applied 300-dimensional GloVe (Pennington et al., 2014) to obtain semantic vectors for all words in a vocabulary size of approximately 30,000 words (Brysbaert et al., 2013) . They then utilized spectral clustering (Luxburg, 2007) to group the vectors into 180 regions, and hand-selected 180 representative words for each regions. -A 4 man-made 27 24 27 26 25 26 27 building 38 31 38 31 33 32 30 build part 56 64 40 62 48 62 61 tool 44 56 40 62 44 46 50 furniture 36 47 50 47 40 44 45 animal 22 34 36 35 32 36 33 kitchen 16 17 19 12 13 12 11 vehicle 50 40 37 42 44 37 34 insect 38 34 38 34 42 33 36 vegetable 32 33 49 30 48 42 37 body part 58 30 48 33 50 28 32 clothing 44 52 39 51 44 45 47 1 LINGUISTIC+VISUAL 2 LINGUISTIC+AUDITORY 3 VISUAL+AUDITORY 4 LINGUISTIC+VISUAL+AUDITORY MOST ERROR LEAST ERROR Table 4 : Selected within-category error statistics.", |
| "cite_spans": [ |
| { |
| "start": 93, |
| "end": 115, |
| "text": "Mitchell et al. (2008)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 416, |
| "end": 441, |
| "text": "(Pennington et al., 2014)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 534, |
| "end": 558, |
| "text": "(Brysbaert et al., 2013)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 600, |
| "end": 615, |
| "text": "(Luxburg, 2007)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 716, |
| "end": 1276, |
| "text": "-A 4 man-made 27 24 27 26 25 26 27 building 38 31 38 31 33 32 30 build part 56 64 40 62 48 62 61 tool 44 56 40 62 44 46 50 furniture 36 47 50 47 40 44 45 animal 22 34 36 35 32 36 33 kitchen 16 17 19 12 13 12 11 vehicle 50 40 37 42 44 37 34 insect 38 34 38 34 42 33 36 vegetable 32 33 49 30 48 42 37 body part 58 30 48 33 50 28 32 clothing 44 52 39 51 44 45 47 1 LINGUISTIC+VISUAL 2 LINGUISTIC+AUDITORY 3 VISUAL+AUDITORY 4 LINGUISTIC+VISUAL+AUDITORY MOST ERROR", |
| "ref_id": "TABREF0" |
| }, |
| { |
| "start": 1289, |
| "end": 1296, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "fMRI Datasets", |
| "sec_num": "6.1.1" |
| }, |
| { |
| "text": "Categories Linguistic Visual Auditory L-V 1 L-A 2 V-A 3 L-V", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "fMRI Datasets", |
| "sec_num": "6.1.1" |
| }, |
| { |
| "text": "We obtain linguistic features from the GloVe (Pennington et al., 2014) , which is trained on Wikipedia 2014 and Gigaword 5. For visual features, We retrieve 300 to 1500 images for each concept noun from ImageNet, except human body word: 'hand', 'foot', 'arm', 'leg' and 'eye', which are not included in the ImageNet. Thus, we retrieve these images from Google Image (Afifi, 2017) . The retrieved images from ImageNet and Google are combined together as the image dataset for visual feature extraction.", |
| "cite_spans": [ |
| { |
| "start": 45, |
| "end": 70, |
| "text": "(Pennington et al., 2014)", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 366, |
| "end": 379, |
| "text": "(Afifi, 2017)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-Sense Dataset", |
| "sec_num": "6.1.2" |
| }, |
| { |
| "text": "For auditory features, we use the Freesound dataset (Font et al., 2013) , which is a huge collaborative database of audio snippets, samples, recordings, and bleeps.", |
| "cite_spans": [ |
| { |
| "start": 52, |
| "end": 71, |
| "text": "(Font et al., 2013)", |
| "ref_id": "BIBREF11" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multi-Sense Dataset", |
| "sec_num": "6.1.2" |
| }, |
| { |
| "text": "In this paper, we use Small World of Words (SWW) (De Deyne et al., 2018) as the word association data source. SWW is a mental dictionary or lexicon in the major languages of the world. It collects associative words by inviting participants globally to play an online game of word associations 1 . The game is simple and easy to play: given a list of 18 cue words, participants are asked to give first three words that come to mind. It counts and demonstrates the human level word associations. For example, top ten forward associations of the cue word 'machine' are 'robot ', 'computer', 'engine', 'metal', 'gun', 'work', 'car', 'washing', 'factory', 'sewing' ; and top ten backward associations of it are 'slot ', 'fax', 'pinball', 'mechanism', 'sewing', 'washing', 'xerox', 'contraption', 'cog', 'copier' . Here, forward association refers to the words will come to mind when participants see the cue word 'machine'; and backward association refers to the word 'machine' will come to mind when participants view other cue words. And their rankings indicate the average order of the word that participants think of in the SWW game.", |
| "cite_spans": [ |
| { |
| "start": 573, |
| "end": 659, |
| "text": "', 'computer', 'engine', 'metal', 'gun', 'work', 'car', 'washing', 'factory', 'sewing'", |
| "ref_id": null |
| }, |
| { |
| "start": 712, |
| "end": 806, |
| "text": "', 'fax', 'pinball', 'mechanism', 'sewing', 'washing', 'xerox', 'contraption', 'cog', 'copier'", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Associative Word Dataset", |
| "sec_num": "6.1.3" |
| }, |
| { |
| "text": "In our paper, we use 60 concrete words from Mitchell et al. (2008) and choose 175 words from Pereira et al. (2018) (we discard 5 words: 'argumentatively', 'deliberately', 'emotionally', 'tried', 'willingly', which do not present in the associative words data source) as the cue words.", |
| "cite_spans": [ |
| { |
| "start": 44, |
| "end": 66, |
| "text": "Mitchell et al. (2008)", |
| "ref_id": "BIBREF26" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Associative Word Dataset", |
| "sec_num": "6.1.3" |
| }, |
| { |
| "text": "As mentioned in Section Task: Predicting the fMRI Activation, the task is to predict the fMRI activations. Following Mitchell et al. (2008) , we train the encoder consisting of several estimators (500 for Mitchell et al. (2008) and 5000 for Pereira et al. (2018) ). Each estimator predicts a fMRI activation value of a specific position in the brain. The estimator is trained by ridge regression where the loss function is the linear least squares function and is regularized by the L 2 -norm (Eq. 1). The regularization strength \u03b1 is chosen by cross-validation.", |
| "cite_spans": [ |
| { |
| "start": 117, |
| "end": 139, |
| "text": "Mitchell et al. (2008)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 205, |
| "end": 227, |
| "text": "Mitchell et al. (2008)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 241, |
| "end": 262, |
| "text": "Pereira et al. (2018)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "We evaluate each encoder's performance by following the strategy of Mitchell et al. (2008) and Pereira et al. (2018) . For each possible pair of fMRI activation, we compute the cosine similarity between predicted and actual one. If the predicted fMRI activation is more similar to its actual one than the alternative, we deem the classification correct. For the data of Mitchell et al. (2008) , each encoder is trained on 58 words and tested on the 2 left out words. The training and testing procedure iterates 1770 times. For the data of Pereira et al. (2018) , each encoder is trained within a cross-validation procedure. In each fold, the parameters are learned from 165 word vectors, and predicted fMRI activation from the 10 left out words. The overall classification accuracy is the fraction of correct pairs. The match score S is calculated as: S(p1 = i1, p2 = i2) = cosine(p1, i1) + cosine(p2, i2).", |
| "cite_spans": [ |
| { |
| "start": 68, |
| "end": 90, |
| "text": "Mitchell et al. (2008)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 95, |
| "end": 116, |
| "text": "Pereira et al. (2018)", |
| "ref_id": "BIBREF33" |
| }, |
| { |
| "start": 370, |
| "end": 392, |
| "text": "Mitchell et al. (2008)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 539, |
| "end": 560, |
| "text": "Pereira et al. (2018)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "(4)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "6.4 Results and Discussion", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Evaluation", |
| "sec_num": "6.3" |
| }, |
| { |
| "text": "The cross-validated prediction accuracies are presented in Table 3 . The expected accuracy of matching the left-out words and images is 0.5 if the model was randomly matching. All learned models predict unseen words significantly above the chance level.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 59, |
| "end": 66, |
| "text": "Table 3", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Uni-and Multi-Modal in fMRI Prediction", |
| "sec_num": "6.4.1" |
| }, |
| { |
| "text": "In terms of unimodal prediction, VISUAL based model overall outperforms others, which verifies the picture superiority effect -human brain is extremely sensitive to the symbolic modality of presentation. VISUAL and LINGUISTIC significantly outperform AUDITORY based model, with the mean between category accuracy drops from approximately 0.8 to 0.68.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Uni-and Multi-Modal in fMRI Prediction", |
| "sec_num": "6.4.1" |
| }, |
| { |
| "text": "In terms of multimodal prediction, adding visual features improves performance as LINGUIS-TIC+VISUAL outperforms LINGUISTIC, VISUAL+AUDITORY outperforms AUDITORY and LINGUIS-TIC+VISUAL+AUDITORY outperforms LINGUISTIC+AUDITORY. These results provide a new proof for the interactive model of brain in behaviour measures which holds that structural and semantic information interact immediately during comprehension at any point in time, and weaken the serial model which proposes that semantic aspects only come into play at later stage and do not allow overlap with previous stages. We also notice that AUDITORY weakens model's prediction ability except for P 6 and P 7. Together with the finding in unimodal experiments that auditory based model performs less significantly than the linguistic and visual based model, the result suggests that visual properties contribute the most in conceptual representation in conceptual representations of nouns in the human brain, while acoustic properties contribute less. The results from P 6 and P 7 also suggest there are individual differences in the effects of different modality data on conceptual representations in the brain. Kiela and Clark (2015) indicate that multimodal representations enriched by auditory information perform well on relatedness and similarity on words that have auditory associations such as instruments. We explore if the fMRI activation can be predicted by sound features, which is generated by using the objects which do not have obvious acoustic properties such as hand, foot, etc. Although the prediction accuracy is lower when using auditory features than using linguistic and visual features, it is significantly above the chance level. The results suggest that acoustic properties play a less important role but are ubiquitous in cognitive processes. We may need to consider the sound factors in the conceptual representation in general. Figure 2 shows the individual mean SE\u00b1accuracy and mean SE\u00b1accuracy of within-category and between category. From Figure 2 , we can see that individual performances vary in prediction and also, the result of between category prediction is better than within category prediction. We assume that this is because the features are much different between a category but more similar within a category, which makes predictions within category more demanding. For example, for linguistic feature, 'dog' has a very similar context with 'cat', such as play, eat, but a very different context from 'machine', of which the context might be artificial, fix. Previous research has suggested that brain may rely on enhanced perceptual processing in order to compensate for inefficient higher level semantic processing, thus the phenomena of high within-category error rate and low between category error rate reflects the sensory compensation mechanism of brain in language processing. Mitchell et al. (2008) and Pereira et al. (2018) dataset. Table 4 shows the within category error, and we observe that Auditory features reduce the error of some categories, for example, for body part, VISUAL+AUDITORY outperforms simply VISUAL, and for building part, LINGUISTIC+AUDITORY outperforms simply LINGUISTIC. It reflects that the brain does trigger auditory senses during the rapid visual analysis and the activation of semantic knowledge, and also supports behavioural neuroscientists on that semantic processes can strongly affect generation of auditory imagery.", |
| "cite_spans": [ |
| { |
| "start": 1173, |
| "end": 1195, |
| "text": "Kiela and Clark (2015)", |
| "ref_id": "BIBREF19" |
| }, |
| { |
| "start": 2888, |
| "end": 2910, |
| "text": "Mitchell et al. (2008)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 2915, |
| "end": 2936, |
| "text": "Pereira et al. (2018)", |
| "ref_id": "BIBREF33" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1916, |
| "end": 1924, |
| "text": "Figure 2", |
| "ref_id": null |
| }, |
| { |
| "start": 2030, |
| "end": 2038, |
| "text": "Figure 2", |
| "ref_id": null |
| }, |
| { |
| "start": 2946, |
| "end": 2953, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Uni-and Multi-Modal in fMRI Prediction", |
| "sec_num": "6.4.1" |
| }, |
| { |
| "text": "We choose the top 5 forward associate words and 5 backward words in our experiments. The concept of 'associate' and associative word dataset are introduced in section 6.1.3. For example, for the word 'invention', the associative words that people most likely to think of are 'new ', 'light bulb', 'idea', 'innovation', 'creation', 'patent', 'Edison', 'Einstein', 'science', 'scientist', 'clever', 'smart', 'creative', 'create', 'Genius' . We use the word 'invention', its associative words and their combinations to predict the fMRI activation separately. Table 5a and Table 5b are the prediction accuracy that we use stimuli and forward associative words as the input on both datasets. Tables 6a and 6b are the prediction accuracy that we use stimuli and backward associative words as input. s-random means using linear combination of stimuli and irrelevant word, which is randomly chosen. s-linear means using linear combination of stimuli and one correspondent associate word. It is important to note that, the irrelevant word is randomly chosen, and it is not associative to the stimuli. For example, for the stimuli 'invention', we may choose the word 'washing', which is not in the associative word pool of 'invention', as the irrelevant word. Figure 3 is the comparison of using various word association, where the original data is extracted from Table 5a, Table 5b, Table 6a and Table 6b .", |
| "cite_spans": [ |
| { |
| "start": 280, |
| "end": 436, |
| "text": "', 'light bulb', 'idea', 'innovation', 'creation', 'patent', 'Edison', 'Einstein', 'science', 'scientist', 'clever', 'smart', 'creative', 'create', 'Genius'", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 556, |
| "end": 564, |
| "text": "Table 5a", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 569, |
| "end": 577, |
| "text": "Table 5b", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 1250, |
| "end": 1258, |
| "text": "Figure 3", |
| "ref_id": "FIGREF3" |
| }, |
| { |
| "start": 1354, |
| "end": 1382, |
| "text": "Table 5a, Table 5b, Table 6a", |
| "ref_id": "TABREF4" |
| }, |
| { |
| "start": 1387, |
| "end": 1395, |
| "text": "Table 6b", |
| "ref_id": "TABREF5" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Associated Concept in fMRI Prediction", |
| "sec_num": "6.4.2" |
| }, |
| { |
| "text": "Compared with (a), (b) in Figure 3 , the prediction accuracy in (c), (d) is the average of 175 words. Thus, the lines in (c), (d) are more smooth. However, though the results in (a), (b) vary, they can still show the overall trend. Further, compared with using forward associative words (results from (a), (c)), using backward associative words has an equivalent performance, which means both forward and backward associative thinkings can reflect the associative conceptual representation.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 26, |
| "end": 34, |
| "text": "Figure 3", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Associated Concept in fMRI Prediction", |
| "sec_num": "6.4.2" |
| }, |
| { |
| "text": "We observe that all models with associative conceptual features outperform above the chance level on both datasets. Compared with using only stimuli or associate word (bottom blue line in Figure 3 ), we also find that the model can better predict fMRI activation by using their linear combination (top yellow line in Figure 3 ). Particularly, by using stimuli and their associative words, the model has the best ability to predict fMRI activations (top yellow line in Figure 3 ). We also observe that after added the irrelevant word, the model's performance decreases. These results show that even though both associative words and irrelevant words are not directly associated with the stimuli words and are distinct from the stimuli x means using only the x \u2212 th ranked associative word, or using linear combination of stimuli word and x \u2212 th ranked associative word to predict the result. The rank tag of an associative word here means the average order of the word that participants think of in the SWW game.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 188, |
| "end": 196, |
| "text": "Figure 3", |
| "ref_id": "FIGREF3" |
| }, |
| { |
| "start": 317, |
| "end": 325, |
| "text": "Figure 3", |
| "ref_id": "FIGREF3" |
| }, |
| { |
| "start": 468, |
| "end": 476, |
| "text": "Figure 3", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Associated Concept in fMRI Prediction", |
| "sec_num": "6.4.2" |
| }, |
| { |
| "text": "words in distributed semantic representation in vector space, the associative words share some significant commonality with stimuli words in human conceptual representations while irrelevant words do not. It demonstrates that associative words serve as a complement to the stimuli words and accord with the brain activity, but the irrelevant words are noise to the conceptual representation. In addition, there is a clear trend that the prediction accuracy decreases as the associative word rank decreases (bottom blue line in Figure 3 ). This result suggests that, given a stimuli, the higher ranked associate word can better reflect associative thinking related to a concept, and the subsequent associative words are less related. In other words, the rank of associative words can reflect the its weight of associative thinking in conceptual representations.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 527, |
| "end": 535, |
| "text": "Figure 3", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Associated Concept in fMRI Prediction", |
| "sec_num": "6.4.2" |
| }, |
| { |
| "text": "We explored conceptual representation in human brains by evaluating the effect of multimodal data in predicting fMRI activation, observing a clear advantage in predicting brain activation for visually grounded models. This finding consistent with the neurological evidence that the word comprehension first involves activation of shallow language-based conceptual representation , which is then complemented by deeper simulation of visual properties of the concept (Louwerse and Hutchinson, 2012) .", |
| "cite_spans": [ |
| { |
| "start": 465, |
| "end": 496, |
| "text": "(Louwerse and Hutchinson, 2012)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "7" |
| }, |
| { |
| "text": "From the associative thinking perspective, we find that though the associative words might be far away in the distributed semantic vector space, we could still use them to better predict fMRI activation. We carried out more thorough and extensive work compare to the work of Bulat et al. (2017) . The findings also support the hypotheses that the linguistic, conceptual and perceptual systems interplay in the human brain (Barsalou, 2008) . The fMRI datasets used in our study are generated by presenting subjects with written words together with pictures. In other words, the fMRI representations are the participants' reactions to linguistic and visual input -but not acoustic. To further study human brain response representations to the acoustic stimuli, we plan to collect fMRI when presenting acoustic concepts.", |
| "cite_spans": [ |
| { |
| "start": 275, |
| "end": 294, |
| "text": "Bulat et al. (2017)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 422, |
| "end": 438, |
| "text": "(Barsalou, 2008)", |
| "ref_id": "BIBREF1" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion and Future Work", |
| "sec_num": "7" |
| }, |
| { |
| "text": "https://smallworldofwords.org/en", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "2017. 11k hands: Gender recognition and biometric identification using a large dataset of hand images", |
| "authors": [ |
| { |
| "first": "Mahmoud", |
| "middle": [], |
| "last": "Afifi", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mahmoud Afifi. 2017. 11k hands: Gender recognition and biometric identification using a large dataset of hand images.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Grounded cognition. Annual review of psychology", |
| "authors": [ |
| { |
| "first": "Lawrence", |
| "middle": [], |
| "last": "Barsalou", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "", |
| "volume": "59", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lawrence Barsalou. 2008. Grounded cognition. Annual review of psychology, 59:617-45, 02.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "A neural probabilistic language model", |
| "authors": [ |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "R\u00e9jean", |
| "middle": [], |
| "last": "Ducharme", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascal", |
| "middle": [], |
| "last": "Vincent", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Janvin", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "J. Mach. Learn. Res", |
| "volume": "3", |
| "issue": "", |
| "pages": "1137--1155", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yoshua Bengio, R\u00e9jean Ducharme, Pascal Vincent, and Christian Janvin. 2003. A neural probabilistic language model. J. Mach. Learn. Res., 3:1137-1155, March.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Multimodal distributional semantics", |
| "authors": [ |
| { |
| "first": "Elia", |
| "middle": [], |
| "last": "Bruni", |
| "suffix": "" |
| }, |
| { |
| "first": "Nam", |
| "middle": [ |
| "Khanh" |
| ], |
| "last": "Tran", |
| "suffix": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Baroni", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "J. Artif. Int. Res", |
| "volume": "49", |
| "issue": "1", |
| "pages": "1--47", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Elia Bruni, Nam Khanh Tran, and Marco Baroni. 2014. Multimodal distributional semantics. J. Artif. Int. Res., 49(1):1-47, January.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Concreteness ratings for 40 thousand generally known english word lemmas. Behavior research methods", |
| "authors": [ |
| { |
| "first": "Marc", |
| "middle": [], |
| "last": "Brysbaert", |
| "suffix": "" |
| }, |
| { |
| "first": "Amy", |
| "middle": [ |
| "Beth" |
| ], |
| "last": "Warriner", |
| "suffix": "" |
| }, |
| { |
| "first": "Victor", |
| "middle": [], |
| "last": "Kuperman", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "", |
| "volume": "46", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marc Brysbaert, Amy Beth Warriner, and Victor Kuperman. 2013. Concreteness ratings for 40 thousand generally known english word lemmas. Behavior research methods, 46, 10.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Speaking, seeing, understanding: Correlating semantic models with conceptual representation in the brain", |
| "authors": [ |
| { |
| "first": "Luana", |
| "middle": [], |
| "last": "Bulat", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| }, |
| { |
| "first": "Ekaterina", |
| "middle": [], |
| "last": "Shutova", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2017 Conference on EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1081--1091", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Luana Bulat, Stephen Clark, and Ekaterina Shutova. 2017. Speaking, seeing, understanding: Correlating semantic models with conceptual representation in the brain. In Proceedings of the 2017 Conference on EMNLP, pages 1081-1091, Copenhagen, Denmark, September. ACL.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "The \"small world of words\" english word association norms for over 12,000 cue words", |
| "authors": [ |
| { |
| "first": "Danielle", |
| "middle": [], |
| "last": "Simon De Deyne", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Navarro", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Behavior Research Methods", |
| "volume": "51", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Simon De Deyne, Danielle Navarro, Amy Perfors, Marc Brysbaert, and Gert Storms. 2018. The \"small world of words\" english word association norms for over 12,000 cue words. Behavior Research Methods, 51, 10.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Imagenet: A large-scale hierarchical image database", |
| "authors": [ |
| { |
| "first": "Jia", |
| "middle": [], |
| "last": "Deng", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Dong", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Li Jia Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Li", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Fei-Fei", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "CVPR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jia Deng, Wei Dong, Richard Socher, Li jia Li, Kai Li, and Li Fei-fei. 2009. Imagenet: A large-scale hierarchical image database. In In CVPR.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Using fmri activation to conceptual stimuli to evaluate methods for extracting conceptual representations from corpora", |
| "authors": [ |
| { |
| "first": "Barry", |
| "middle": [], |
| "last": "Devereux", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Kelly", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Korhonen", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of First Workshop On Computational Neurolinguistics, NAACL HLT", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Barry Devereux, C Kelly, and A Korhonen. 2010. Using fmri activation to conceptual stimuli to evaluate methods for extracting conceptual representations from corpora. Proceedings of First Workshop On Computational Neurolinguistics, NAACL HLT, pages 70-78, 01.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Concept representation reflects multimodal abstraction: A framework for embodied semantics", |
| "authors": [ |
| { |
| "first": "Leonardo", |
| "middle": [], |
| "last": "Fernandino", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeffrey", |
| "middle": [ |
| "R" |
| ], |
| "last": "Binder", |
| "suffix": "" |
| }, |
| { |
| "first": "H", |
| "middle": [], |
| "last": "Rutvik", |
| "suffix": "" |
| }, |
| { |
| "first": "Suzanne", |
| "middle": [ |
| "L" |
| ], |
| "last": "Desai", |
| "suffix": "" |
| }, |
| { |
| "first": "Colin", |
| "middle": [ |
| "J" |
| ], |
| "last": "Pendl", |
| "suffix": "" |
| }, |
| { |
| "first": "William", |
| "middle": [ |
| "L" |
| ], |
| "last": "Humphries", |
| "suffix": "" |
| }, |
| { |
| "first": "Lisa", |
| "middle": [ |
| "L" |
| ], |
| "last": "Gross", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [ |
| "S" |
| ], |
| "last": "Conant", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Seidenberg", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Cerebral cortex", |
| "volume": "26", |
| "issue": "", |
| "pages": "2018--2052", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Leonardo Fernandino, Jeffrey R. Binder, Rutvik H. Desai, Suzanne L Pendl, Colin J Humphries, William L. Gross, Lisa L. Conant, and Mark S. Seidenberg. 2016. Concept representation reflects multimodal abstraction: A framework for embodied semantics. Cerebral cortex, 26 5:2018-34.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "A synopsis of linguistic theory 1930-1955. Special Volume of the Philological Society", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [], |
| "last": "Rupert", |
| "suffix": "" |
| }, |
| { |
| "first": "Firth", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 1957, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "John Rupert Firth. 1957. A synopsis of linguistic theory 1930-1955. Special Volume of the Philological Society., page 11.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Freesound technical demo", |
| "authors": [ |
| { |
| "first": "Frederic", |
| "middle": [], |
| "last": "Font", |
| "suffix": "" |
| }, |
| { |
| "first": "Gerard", |
| "middle": [], |
| "last": "Roma", |
| "suffix": "" |
| }, |
| { |
| "first": "Xavier", |
| "middle": [], |
| "last": "Serra", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "ACM International Conference on Multimedia (MM'13)", |
| "volume": "", |
| "issue": "", |
| "pages": "411--412", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Frederic Font, Gerard Roma, and Xavier Serra. 2013. Freesound technical demo. In ACM International Confer- ence on Multimedia (MM'13), pages 411-412, Barcelona, Spain, 21/10/2013. ACM, ACM.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Content-based retrieval of music and audio", |
| "authors": [ |
| { |
| "first": "Jonathan", |
| "middle": [ |
| "T" |
| ], |
| "last": "Foote", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "MULTIMEDIA STORAGE AND ARCHIVING SYSTEMS II", |
| "volume": "", |
| "issue": "", |
| "pages": "138--147", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jonathan T. Foote. 1997. Content-based retrieval of music and audio. In MULTIMEDIA STORAGE AND ARCHIVING SYSTEMS II, PROC. OF SPIE, pages 138-147.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Distributional structure. Word", |
| "authors": [ |
| { |
| "first": "Zellig", |
| "middle": [], |
| "last": "Harris", |
| "suffix": "" |
| } |
| ], |
| "year": 1954, |
| "venue": "", |
| "volume": "10", |
| "issue": "", |
| "pages": "146--162", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zellig Harris. 1954. Distributional structure. Word, 10(23):146-162.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Deep residual learning for image recognition", |
| "authors": [ |
| { |
| "first": "Kaiming", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiangyu", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Shaoqing", |
| "middle": [], |
| "last": "Ren", |
| "suffix": "" |
| }, |
| { |
| "first": "Jian", |
| "middle": [], |
| "last": "Sun", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "IEEE Conference on CVPR", |
| "volume": "", |
| "issue": "", |
| "pages": "770--778", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kaiming He, Xiangyu Zhang, Shaoqing Ren, and Jian Sun. 2016. Deep residual learning for image recognition. 2016 IEEE Conference on CVPR, pages 770-778.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Distributed representation of objects in the human ventral visual pathway", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Ishai", |
| "suffix": "" |
| }, |
| { |
| "first": "L", |
| "middle": [ |
| "G" |
| ], |
| "last": "Ungerleider", |
| "suffix": "" |
| }, |
| { |
| "first": "A", |
| "middle": [], |
| "last": "Martin", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [ |
| "L" |
| ], |
| "last": "Schouten", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [ |
| "V" |
| ], |
| "last": "Haxby", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "Proc Natl Acad Sci U S A", |
| "volume": "96", |
| "issue": "16", |
| "pages": "9379--9384", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "A. Ishai, L. G. Ungerleider, A. Martin, J. L. Schouten, and J. V. Haxby. 1999. Distributed representation of objects in the human ventral visual pathway. Proc Natl Acad Sci U S A, 96(16):9379-9384, Aug. 10430951[pmid].", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Wordnet based features for predicting brain activity associated with meanings of nouns", |
| "authors": [ |
| { |
| "first": "Mehrdad", |
| "middle": [], |
| "last": "Ahmad Babaeian Jelodar", |
| "suffix": "" |
| }, |
| { |
| "first": "Shahram", |
| "middle": [], |
| "last": "Alizadeh", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Khadivi", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "Proceedings of the NAACL HLT 2010 First Workshop on Computational Neurolinguistics, CN '10", |
| "volume": "", |
| "issue": "", |
| "pages": "18--26", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ahmad Babaeian Jelodar, Mehrdad Alizadeh, and Shahram Khadivi. 2010. Wordnet based features for predicting brain activity associated with meanings of nouns. In Proceedings of the NAACL HLT 2010 First Workshop on Computational Neurolinguistics, CN '10, pages 18-26, Stroudsburg, PA, USA. ACL.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "A neurosemantic theory of concrete noun representation based on the underlying brain codes", |
| "authors": [ |
| { |
| "first": "Marcel", |
| "middle": [ |
| "Adam" |
| ], |
| "last": "Just", |
| "suffix": "" |
| }, |
| { |
| "first": "Vladimir", |
| "middle": [ |
| "L" |
| ], |
| "last": "Cherkassky", |
| "suffix": "" |
| }, |
| { |
| "first": "Sandesh", |
| "middle": [], |
| "last": "Aryal", |
| "suffix": "" |
| }, |
| { |
| "first": "Tom", |
| "middle": [ |
| "M" |
| ], |
| "last": "Mitchell", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "PLoS ONE", |
| "volume": "5", |
| "issue": "1", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Marcel Adam Just, Vladimir L. Cherkassky, Sandesh Aryal, and Tom M. Mitchell. 2010. A neurosemantic theory of concrete noun representation based on the underlying brain codes. PLoS ONE, 5(1):e8622, jan.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Learning image embeddings using convolutional neural networks for improved multi-modal semantics", |
| "authors": [ |
| { |
| "first": "Douwe", |
| "middle": [], |
| "last": "Kiela", |
| "suffix": "" |
| }, |
| { |
| "first": "L\u00e9on", |
| "middle": [], |
| "last": "Bottou", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of the 2014 Conference on EMNLP (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "36--45", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Douwe Kiela and L\u00e9on Bottou. 2014. Learning image embeddings using convolutional neural networks for improved multi-modal semantics. In Proceedings of the 2014 Conference on EMNLP (EMNLP), pages 36-45, Doha, Qatar, October. ACL.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Multi-and cross-modal semantics beyond vision: Grounding in auditory perception", |
| "authors": [ |
| { |
| "first": "Douwe", |
| "middle": [], |
| "last": "Kiela", |
| "suffix": "" |
| }, |
| { |
| "first": "Stephen", |
| "middle": [], |
| "last": "Clark", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference on EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "2461--2470", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Douwe Kiela and Stephen Clark. 2015. Multi-and cross-modal semantics beyond vision: Grounding in auditory perception. In Proceedings of the 2015 Conference on EMNLP, pages 2461-2470, Lisbon, Portugal, September. ACL.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "MMFeat: A toolkit for extracting multi-modal features", |
| "authors": [ |
| { |
| "first": "Douwe", |
| "middle": [], |
| "last": "Kiela", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of ACL-2016 System Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "55--60", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Douwe Kiela. 2016. MMFeat: A toolkit for extracting multi-modal features. In Proceedings of ACL-2016 System Demonstrations, pages 55-60, Berlin, Germany, August. ACL.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Neurological evidence linguistic processes precede perceptual simulation in conceptual processing", |
| "authors": [ |
| { |
| "first": "Max", |
| "middle": [], |
| "last": "Louwerse", |
| "suffix": "" |
| }, |
| { |
| "first": "Sterling", |
| "middle": [], |
| "last": "Hutchinson", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Frontiers in Psychology", |
| "volume": "3", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Max Louwerse and Sterling Hutchinson. 2012. Neurological evidence linguistic processes precede perceptual simulation in conceptual processing. Frontiers in Psychology, 3, 10.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "A tutorial on spectral clustering", |
| "authors": [ |
| { |
| "first": "Ulrike", |
| "middle": [], |
| "last": "Luxburg", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Statistics and Computing", |
| "volume": "17", |
| "issue": "4", |
| "pages": "395--416", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ulrike Luxburg. 2007. A tutorial on spectral clustering. Statistics and Computing, 17(4):395-416, December.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "The representation of object concepts in the brain", |
| "authors": [ |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Martin", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Annual Review of Psychology", |
| "volume": "58", |
| "issue": "1", |
| "pages": "25--45", |
| "other_ids": { |
| "PMID": [ |
| "16968210" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Alex Martin. 2007. The representation of object concepts in the brain. Annual Review of Psychology, 58(1):25-45. PMID: 16968210.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Wordnet: A lexical database for english", |
| "authors": [ |
| { |
| "first": "A", |
| "middle": [], |
| "last": "George", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Miller", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "COMMUNICATIONS OF THE ACM", |
| "volume": "38", |
| "issue": "", |
| "pages": "39--41", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "George A. Miller. 1995. Wordnet: A lexical database for english. COMMUNICATIONS OF THE ACM, 38:39-41.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Learning to decode cognitive states from brain images", |
| "authors": [ |
| { |
| "first": "Tom", |
| "middle": [ |
| "M" |
| ], |
| "last": "Mitchell", |
| "suffix": "" |
| }, |
| { |
| "first": "Rebecca", |
| "middle": [], |
| "last": "Hutchinson", |
| "suffix": "" |
| }, |
| { |
| "first": "Radu", |
| "middle": [ |
| "S" |
| ], |
| "last": "Niculescu", |
| "suffix": "" |
| }, |
| { |
| "first": "Francisco", |
| "middle": [], |
| "last": "Pereira", |
| "suffix": "" |
| }, |
| { |
| "first": "Xuerui", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcel", |
| "middle": [], |
| "last": "Just", |
| "suffix": "" |
| }, |
| { |
| "first": "Sharlene", |
| "middle": [], |
| "last": "Newman", |
| "suffix": "" |
| } |
| ], |
| "year": 2004, |
| "venue": "Mach. Learn", |
| "volume": "57", |
| "issue": "1-2", |
| "pages": "145--175", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tom M. Mitchell, Rebecca Hutchinson, Radu S. Niculescu, Francisco Pereira, Xuerui Wang, Marcel Just, and Sharlene Newman. 2004. Learning to decode cognitive states from brain images. Mach. Learn., 57(1-2):145- 175, October.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Predicting human brain activity associated with the meanings of nouns", |
| "authors": [ |
| { |
| "first": "Tom", |
| "middle": [ |
| "M" |
| ], |
| "last": "Mitchell", |
| "suffix": "" |
| }, |
| { |
| "first": "Svetlana", |
| "middle": [ |
| "V" |
| ], |
| "last": "Shinkareva", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Carlson", |
| "suffix": "" |
| }, |
| { |
| "first": "Kai-Min", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Vicente", |
| "middle": [ |
| "L" |
| ], |
| "last": "Malave", |
| "suffix": "" |
| }, |
| { |
| "first": "Robert", |
| "middle": [ |
| "A" |
| ], |
| "last": "Mason", |
| "suffix": "" |
| }, |
| { |
| "first": "Marcel", |
| "middle": [ |
| "Adam" |
| ], |
| "last": "Just", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Science", |
| "volume": "320", |
| "issue": "5880", |
| "pages": "1191--1195", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tom M. Mitchell, Svetlana V. Shinkareva, Andrew Carlson, Kai-Min Chang, Vicente L. Malave, Robert A. Mason, and Marcel Adam Just. 2008. Predicting human brain activity associated with the meanings of nouns. Science, 320(5880):1191-1195.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Selecting corpus-semantic models for neurolinguistic decoding", |
| "authors": [ |
| { |
| "first": "Brian", |
| "middle": [], |
| "last": "Murphy", |
| "suffix": "" |
| }, |
| { |
| "first": "Partha", |
| "middle": [], |
| "last": "Talukdar", |
| "suffix": "" |
| }, |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Mitchell", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "*SEM 2012: The First Joint Conference on Lexical and Computational Semantics", |
| "volume": "1", |
| "issue": "", |
| "pages": "7--8", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Brian Murphy, Partha Talukdar, and Tom Mitchell. 2012. Selecting corpus-semantic models for neurolinguistic decoding. In *SEM 2012: The First Joint Conference on Lexical and Computational Semantics -Volume 1: Proceedings of the main conference and the shared task, and Volume 2: Proceedings of the Sixth International Workshop on Semantic Evaluation (SemEval 2012), pages 114-123, Montr\u00e9al, Canada, 7-8 June. ACL.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Speech communication: human and machine. Addison-Wesley series in electrical engineering: digital signal processing", |
| "authors": [ |
| { |
| "first": "D", |
| "middle": [], |
| "last": "O'shaughnessy", |
| "suffix": "" |
| } |
| ], |
| "year": 1987, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "D. O'Shaughnessy. 1987. Speech communication: human and machine. Addison-Wesley series in electrical engineering: digital signal processing. Universities Press (India) Pvt. Limited.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Glove: Global vectors for word representation", |
| "authors": [ |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "EMNLP (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "1532--1543", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeffrey Pennington, Richard Socher, and Christopher D. Manning. 2014. Glove: Global vectors for word repre- sentation. In EMNLP (EMNLP), pages 1532-1543.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "Machine learning classifiers and fmri: A tutorial overview", |
| "authors": [ |
| { |
| "first": "Francisco", |
| "middle": [], |
| "last": "Pereira", |
| "suffix": "" |
| }, |
| { |
| "first": "Tom", |
| "middle": [ |
| "M" |
| ], |
| "last": "Mitchell", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Botvinick", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "NeuroImage", |
| "volume": "45", |
| "issue": "1", |
| "pages": "199--209", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Francisco Pereira, Tom M. Mitchell, and Matthew Botvinick. 2009. Machine learning classifiers and fmri: A tutorial overview. NeuroImage, 45 1 Suppl:S199-209.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Generating text from functional brain images", |
| "authors": [ |
| { |
| "first": "Francisco", |
| "middle": [], |
| "last": "Pereira", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Detre", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Botvinick", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Frontiers in Human Neuroscience", |
| "volume": "5", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Francisco Pereira, Greg Detre, and Matthew Botvinick. 2011. Generating text from functional brain images. Frontiers in Human Neuroscience, 5:72.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Using wikipedia to learn semantic feature representations of concrete concepts in neuroimaging experiments", |
| "authors": [ |
| { |
| "first": "Francisco", |
| "middle": [], |
| "last": "Pereira", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Botvinick", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Detre", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "Artif. Intell", |
| "volume": "194", |
| "issue": "", |
| "pages": "240--252", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Francisco Pereira, Matthew Botvinick, and Greg Detre. 2013. Using wikipedia to learn semantic feature represen- tations of concrete concepts in neuroimaging experiments. Artif. Intell., 194:240-252, January.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Toward a universal decoder of linguistic meaning from brain activation", |
| "authors": [ |
| { |
| "first": "Francisco", |
| "middle": [], |
| "last": "Pereira", |
| "suffix": "" |
| }, |
| { |
| "first": "Bin", |
| "middle": [], |
| "last": "Lou", |
| "suffix": "" |
| }, |
| { |
| "first": "Brianna", |
| "middle": [], |
| "last": "Pritchett", |
| "suffix": "" |
| }, |
| { |
| "first": "Samuel", |
| "middle": [], |
| "last": "Ritter", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Samuel", |
| "suffix": "" |
| }, |
| { |
| "first": "Nancy", |
| "middle": [], |
| "last": "Gershman", |
| "suffix": "" |
| }, |
| { |
| "first": "Matthew", |
| "middle": [], |
| "last": "Kanwisher", |
| "suffix": "" |
| }, |
| { |
| "first": "Evelina", |
| "middle": [], |
| "last": "Botvinick", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Fedorenko", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Nature communications", |
| "volume": "9", |
| "issue": "1", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Francisco Pereira, Bin Lou, Brianna Pritchett, Samuel Ritter, Samuel J Gershman, Nancy Kanwisher, Matthew Botvinick, and Evelina Fedorenko. 2018. Toward a universal decoder of linguistic meaning from brain activa- tion. Nature communications, 9(1):963.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "uris": null, |
| "num": null, |
| "type_str": "figure", |
| "text": "Compute multimodal embeddings." |
| }, |
| "FIGREF1": { |
| "uris": null, |
| "num": null, |
| "type_str": "figure", |
| "text": "Mean accuracy on Pereira et al. (2018) dataset." |
| }, |
| "FIGREF3": { |
| "uris": null, |
| "num": null, |
| "type_str": "figure", |
| "text": "Comparison of various word association features. The top yellow line is corresponding to the results of s-linear, the below blue line is the result of s-random. For the point (x, y) in bottom blue line or top yellow line," |
| }, |
| "TABREF0": { |
| "content": "<table><tr><td colspan=\"2\">Categories Words</td></tr><tr><td>animal</td><td>bear, cat, dog, horse, cow</td></tr><tr><td>vegetable</td><td>lettuce, carrot, corn, tomato, celery</td></tr><tr><td>body part</td><td>eye, arm, foot, leg, hand</td></tr><tr><td colspan=\"2\">man-made telephone, key, bell, watch, refrigerator</td></tr><tr><td>building</td><td>igloo, barn, house, apartment, church</td></tr><tr><td>kitchen</td><td>spoon, bottle, cup, knife, glass</td></tr><tr><td>vehicle</td><td>truck, car, train, bicycle, airplane</td></tr><tr><td>clothing</td><td>dress, skirt, coat, pants, shirt</td></tr><tr><td>furniture</td><td>chair, dresser, desk, bed, table</td></tr><tr><td>build part</td><td>door, chimney, closet, arch, window</td></tr><tr><td>insect</td><td>fly, bee, butterfly, ant, beetle</td></tr><tr><td>tool</td><td>hammer, chisel, screwdriver, saw, pliers</td></tr></table>", |
| "html": null, |
| "num": null, |
| "text": "fMRI datasets for language-brain encoding.", |
| "type_str": "table" |
| }, |
| "TABREF1": { |
| "content": "<table/>", |
| "html": null, |
| "num": null, |
| "text": "", |
| "type_str": "table" |
| }, |
| "TABREF3": { |
| "content": "<table/>", |
| "html": null, |
| "num": null, |
| "text": "Accuracies of within and between-category examples for all participants (P i). Within-category refers to stimuli coming from the same category (e.g. bear and cat come from the category of the animal) whereas between-category refers to stimuli coming from different categories.", |
| "type_str": "table" |
| }, |
| "TABREF4": { |
| "content": "<table><tr><td/><td>Stimuli</td><td>1</td><td>Backward Association Word 2 3 4</td><td>5</td><td/><td>Stimuli</td><td>1</td><td>Backward Association Word 2 3 4</td><td>5</td></tr><tr><td>s-random s-linear</td><td>0.80</td><td colspan=\"3\">0.74 0.74 0.74 0.74 0.74 0.77 0.78 0.80 0.79 0.78</td><td>s-random s-linear</td><td>0.73</td><td colspan=\"2\">0.68 0.68 0.69 0.69 0.68 0.71 0.71 0.71 0.71 0.71</td></tr><tr><td colspan=\"4\">(a) Mean accuracy on Mitchell et al. (2008) dataset.</td><td/><td colspan=\"4\">(b) Mean accuracy on Pereira et al. (2018) dataset.</td></tr></table>", |
| "html": null, |
| "num": null, |
| "text": "Mean FORWARD fMRI activation prediction accuracy onMitchell et al. (2008) andPereira et al. (2018) dataset.", |
| "type_str": "table" |
| }, |
| "TABREF5": { |
| "content": "<table/>", |
| "html": null, |
| "num": null, |
| "text": "Mean BACKWARD fMRI activation prediction accuracy on", |
| "type_str": "table" |
| } |
| } |
| } |
| } |