| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T10:24:13.453541Z" |
| }, |
| "title": "Topic Embedding Regression Model and its Application to Financial Texts", |
| "authors": [ |
| { |
| "first": "Weiran", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Hiroshima University", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Koji", |
| "middle": [], |
| "last": "Eguchi", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Hiroshima University", |
| "location": {} |
| }, |
| "email": "kxeguchi@hiroshima-u.ac.jp" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "In this paper, we aim to predict stock price return rates by analyzing text data in financial news articles. A promising text analysis technique is word embedding that maps words into a lowdimensional continuous embedding space by exploiting the local word collocation patterns in a small context window. Another means of analyzing text is topic modeling that maps each document into a low-dimensional topic space. Recently developed topic embedding takes advantage of those two approaches by modeling latent topics of each document in a word embedding space. In this paper, by incorporating regression into the topic embedding model, we propose a topic embedding regression model called TopicVec-Reg to jointly model each document and a response variable associated with the document. Moreover, our method predicts the stock price return rate for unseen unlabeled financial articles. We evaluated the effectiveness of TopicVec-Reg through experiments in the task of stock return rate prediction using news articles provided by Thomson Reuters and stock prices by the Tokyo Stock Exchange. The result of closed test experiments showed that our method brought meaningful improvement on prediction performance in comparison to performing linear regression as post-processing of TopicVec. Through an open test, our method showed better prediction accuracy with a statistically significant difference.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "In this paper, we aim to predict stock price return rates by analyzing text data in financial news articles. A promising text analysis technique is word embedding that maps words into a lowdimensional continuous embedding space by exploiting the local word collocation patterns in a small context window. Another means of analyzing text is topic modeling that maps each document into a low-dimensional topic space. Recently developed topic embedding takes advantage of those two approaches by modeling latent topics of each document in a word embedding space. In this paper, by incorporating regression into the topic embedding model, we propose a topic embedding regression model called TopicVec-Reg to jointly model each document and a response variable associated with the document. Moreover, our method predicts the stock price return rate for unseen unlabeled financial articles. We evaluated the effectiveness of TopicVec-Reg through experiments in the task of stock return rate prediction using news articles provided by Thomson Reuters and stock prices by the Tokyo Stock Exchange. The result of closed test experiments showed that our method brought meaningful improvement on prediction performance in comparison to performing linear regression as post-processing of TopicVec. Through an open test, our method showed better prediction accuracy with a statistically significant difference.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "In financial markets, people generally make decisions about where to invest after taking into account economic indicators, news about companies, and events in the world. However, with the development of information technology, a large amount of text data is being transmitted every day, and it is practically difficult to keep track of all the information in the domain of interest. Therefore, as a means to support people's decision-making, some research has been conducted to predict financial indicators such as stock prices from a large amount of text data using machine learning techniques. In this paper, we analyze financial news articles that reflect business sentiment and corporate activities in particular and tackle the problem of predicting stock prices related to them.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "As a tool for analyzing large amounts of text data, it is common to use topic models. The topic models are statistical machine learning models that discover semantic structures hidden in a collection of documents and have been applied in various fields. A representative topic model is Latent Dirichlet Allocation (LDA) [Blei et al., 2003] . LDA assumes a latent variable that indicates the topic behind each word in a document and analyzes what topics the document is composed of by estimating the latent variable. If the size of the corpus is large enough, the co-occurrence patterns of words reflect the semantic relatedness between words, and thus appropriate topics can be found. By the way, word expressions need to be replaced in advance from the original text format to a numerical format that can be handled by machine learning. In the case of LDA, we first prepare a vocabulary, assign IDs to all word types, and represent each word in a \"one-hot\" representation. However, the \"one-hot\" representations suffer from problems such as high dimensionality and sparsity. Dieng et al. [2020] reported that as the size of the corpus increases, the quality of LDA's topics decreases. One way to eschew the problem of \"one-hot\" representation is word embedding. Word embedding uses local co-occurrence patterns of words to learn vector representations that take into account the relevance between words. In other words, the method embeds a vocabulary of more than tens of thousands in a lowdimensional vector space. Word embedding has the property that words with similar meanings are closely mapped in the vector space.", |
| "cite_spans": [ |
| { |
| "start": 320, |
| "end": 339, |
| "text": "[Blei et al., 2003]", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 1089, |
| "end": 1095, |
| "text": "[2020]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In this paper, we focus on a topic embedding that gives a smooth combination of the topic model and the word embedding. Topic embedding discovers the semantic structure of each document with the words represented by word embeddings. Also, latent variables related to the topics are represented in the word embedding space. Based on this idea, a topic embedding model TopicVec [Li et al., 2016] was proposed by adding topics to a generative word embedding model PSDVec [Li et al., 2015] . In TopicVec, similar to LDA, the topic distributions are assumed to be regularized with Dirichlet priors. Also each word in a document is assumed to be drawn from a link function that takes local context and global topics into account. Topic embedding has been reported to outperform LDA in topic quality, handling OOV, and tasks such as document classification.", |
| "cite_spans": [ |
| { |
| "start": 376, |
| "end": 393, |
| "text": "[Li et al., 2016]", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 468, |
| "end": 485, |
| "text": "[Li et al., 2015]", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "To the best of our knowledge, there is no research on using topic embedding models such as TopicVec for regression problems, or on using them to predict stock prices from text data of financial articles. In this paper, we propose TopicVec-Reg as a topic embedding model with a regression function to model the relationship between each document and a response variable associated with it. The parameters including the regression coefficients and latent variables of TopicVec-Reg can be learned simultaneously using a variational Bayesian inference method.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We evaluated the effectiveness of TopicVec-Reg in comparison to performing linear regression as post-processing of TopicVec through experiments in the task of stock return rate prediction using news articles provided by Thomson Reuters and stock prices by the Tokyo Stock Exchange. The result showed that our model brought meaningful improvement on prediction performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Das et al. [2015] proposed GaussianLDA, which uses pretrained word embeddings and assumes that words in a topic are drawn from a multivariate Gaussian distribution with the topic embedding as the expectation.", |
| "cite_spans": [ |
| { |
| "start": 11, |
| "end": 17, |
| "text": "[2015]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "More recently, Dieng et al. [2020] proposed embedded topic model (ETM), which assumes that words are generated from a categorical distribution whose parameter is the inner product of the word embeddings and the embedding of the assigned topic.", |
| "cite_spans": [ |
| { |
| "start": 28, |
| "end": 34, |
| "text": "[2020]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "As another work on topic embedding, Li et al. [2016] proposed TopicVec based on a generative word embedding model called PSDVec [Li et al., 2015] . PSDVec assumes that the conditional distribution of each word given its context can be factorized approximately into independent logbilinear terms. In TopicVec, the conditional distribution of each word is influenced by not only its context but also the topic assigned to it. Our proposed model is positioned as an extension of PSDVec and TopicVec, which will be further described in the next section. Blei et al. [2010] proposed a supervised topic model, Supervised Latent Dirichlet Allocation (sLDA), as a way to predict the response variable associated with each document. The response variable is assumed to be generated from a generalized linear model, such as linear regression with the expectation of the latent variables of the topics assigned to the document. By modeling the document and the response variable simultaneously, it is expected to be able to estimate the latent topics that can predict the response variable for a newly given document. However, there is a similar problem with LDA in \"one-hot\" representations, as mentioned in the previous section.", |
| "cite_spans": [ |
| { |
| "start": 36, |
| "end": 52, |
| "text": "Li et al. [2016]", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 128, |
| "end": 145, |
| "text": "[Li et al., 2015]", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 550, |
| "end": 568, |
| "text": "Blei et al. [2010]", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Thus, in this paper, by incorporating a linear regression model into the topic embedding model TopicVec, we develop a topic embedding regression model TopicVec-Reg.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Fisrst of all, Table 1 lists the notations used in this paper.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 15, |
| "end": 22, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Background", |
| "sec_num": "3" |
| }, |
| { |
| "text": "S Vocabulary{s 1 , \u2022 \u2022 \u2022 , s W } V Embedding martix(v s1 , \u2022 \u2022 \u2022 , v s W ) D Document set{d 1 , \u2022 \u2022 \u2022 , d M } v si Embedding of word s i a sisj , A Bigram residuals t k , T Topic embeddings r k , r Topic residuals z ij", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Name Description", |
| "sec_num": null |
| }, |
| { |
| "text": "Topic assignment of the j-th word in doc ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Name Description", |
| "sec_num": null |
| }, |
| { |
| "text": "d i \u03c6 i Mixing proportions of topics in doc d i y response variables{y i , \u2022 \u2022 \u2022 , y M } \u03b7 regression coefficients{\u03b7 1 , \u2022 \u2022 \u2022 , \u03b7 K }", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Name Description", |
| "sec_num": null |
| }, |
| { |
| "text": "PSDVec (Positive-Semidefinite Vectors) [Li et al., 2015 ] is a generative word embedding method and the precursor of Top-icVec. In PSDVec, given its context words, the conditional distribution of the focus word is defined by the following link function:", |
| "cite_spans": [ |
| { |
| "start": 39, |
| "end": 55, |
| "text": "[Li et al., 2015", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Embedding Model: PSDVec", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "P (w c |w 0 : w c\u22121 ) \u2248P (w c ) exp v wc c\u22121 l=0 v w l + c\u22121 l=0 a w l wc .", |
| "eq_num": "(1)" |
| } |
| ], |
| "section": "Word Embedding Model: PSDVec", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Here the focus word w c is assumed to be generated depending on the context words of size c. v T wc v w l captures linear correlations of two words and a wcw l is the bigram residual that captures the non-linear part.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Embedding Model: PSDVec", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "Given the hyperparameter", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Embedding Model: PSDVec", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "\u00b5 = (\u00b5 1 , \u2022 \u2022 \u2022 , \u00b5 W )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Embedding Model: PSDVec", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "and a weight function on the bigram probability f (h ij ), the generative process for the corpus is as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Embedding Model: PSDVec", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "1. For each word", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Embedding Model: PSDVec", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "s i , draw the embedding v si from N (0, 1 2\u00b5i I); 2. For each bigram (s i , s j ), draw a sisj from N 0, 1", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Embedding Model: PSDVec", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "2f (hij ) ; 3. For each document d i , draw w ij from vocabulary S according to the probability defined by (1). We omit the derivation process here. The derived optimization objective is to fit PMI(s i , s j ) = log P (si,sj ) P (si)P (sj ) using v sj v si and it is approached by a Block Coordinate Descent algorithm.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Word Embedding Model: PSDVec", |
| "sec_num": "3.1" |
| }, |
| { |
| "text": "The conditional distribution of the focus word in TopicVec [Li et al., 2016] is defined by the following function:", |
| "cite_spans": [ |
| { |
| "start": 59, |
| "end": 76, |
| "text": "[Li et al., 2016]", |
| "ref_id": "BIBREF2" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Embedding Model: TopicVec", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "P (w c |w 0 : w c\u22121 , z c , d i ) \u2248P (w c ) exp v wc c\u22121 l=0 v w l + t zc + c\u22121 l=0 a w l wc + r zc .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Embedding Model: TopicVec", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "(2) Here t zc is the embedding of the topic assigned to the focus word and it can be treated as one of the context words. r zc is the residual about the topic z c . With the link function, the relevance between the word and the topic is encoded by the cosine distance in the embedding space. The generative process of TopicVec is as follows: 1. For each topic k, draw a topic embedding uniformly from a hyper ball of radius \u03b3, i.e. t k \u223c Unif(B \u03b3 ); 2. For each document d i :", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Embedding Model: TopicVec", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "(a) Draw the mixing proportions \u03c6 i from the Dirichlet prior Dir(\u03b1); (b) For the j-th word:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Embedding Model: TopicVec", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "i. Draw topic assignment z ij from the categorical distribution Cat(\u03c6 i ); ii. Draw word w ij from vocabulary S according to", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Embedding Model: TopicVec", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "P (w ij |w i,j\u2212c : w i,j\u22121 , z ij , d i ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Embedding Model: TopicVec", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The graphical model in Figure 1 presents the generative process above.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 23, |
| "end": 31, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Topic Embedding Model: TopicVec", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "The complete data loglikelihood of the whole corpus: the full joint log-probability of the corpus D, word embeddings V , bigram residuals A, topic embeddings T , topic assignments Z, and topic distributions \u03c6 can be written as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Embedding Model: TopicVec", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "log p(D, A, V , Z, T , \u03c6|\u03b1, \u03b3, \u00b5) =C 0 \u2212 log Z(H, \u00b5) \u2212 A 2 f (H) \u2212 W i=1 \u00b5 i v si 2 + M i=1 K k=1 log \u03c6 ik (m ik + \u03b1 k \u2212 1) + Li j=1 r zij + v wij \uf8eb \uf8ed j\u22121 l=j\u2212c v w il + t zij \uf8f6 \uf8f8 + j\u22121 l=j\u2212c a w il wij ,", |
| "eq_num": "(3)" |
| } |
| ], |
| "section": "Topic Embedding Model: TopicVec", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "where m ik = Li j=1 \u03b4(z ij = k) indicates that the number of words assigned to topic k. C 0 is constant given the hyperparameters.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Embedding Model: TopicVec", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Given the hyperparameters \u03b1, \u03b3, and \u00b5, the optimal V , T , and p(Z, \u03c6|D, A, V , T ) are estimated to maximize the loglikelihood as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Embedding Model: TopicVec", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Step1 V and A are obtained according to the original PSD-Vec; Step2 Given V and A, the loglikelihood function is used to find the optimal T and p(Z, \u03c6|D, A, V , T ). Since the posterior p(Z, \u03c6|D, T ) is analytically intractable, the posterior is approximated by the variational distribution q(Z, \u03c6; \u03c0, \u03b8) = q(\u03c6; \u03b8)q(Z; \u03c0). Here, the KL divergence is introduced and the estimation task is replaced with the problem of maximizing the variational lower bound L(q, T ):", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Embedding Model: TopicVec", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "KL(q p) = log p(D|T ) \u2212 (E q [log p(D, Z, \u03c6|T )] + H(q)) = log p(D|T ) \u2212 L(q, T )", |
| "eq_num": "(4)" |
| } |
| ], |
| "section": "Topic Embedding Model: TopicVec", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Here, H(q) is the entropy of q. The variational lower bound L(q, T ) is as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Embedding Model: TopicVec", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "L(q, T ) = M i=1 \uf8f1 \uf8f2 \uf8f3 K k=1 \uf8eb \uf8ed K j=1 \u03c0 k ij + \u03b1 k \u2212 1 \uf8f6 \uf8f8 (\u03c8 (\u03b8 ik ) \u2212 \u03c8 (\u03b8 i0 )) + Tr \uf8eb \uf8ed T Li j=1 v wij \u03c0 ij \uf8f6 \uf8f8 + r Li j=1 \u03c0 ij + H(q) + C 1 .", |
| "eq_num": "(5)" |
| } |
| ], |
| "section": "Topic Embedding Model: TopicVec", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Here the generalized EM algorithm is used to find the optimal q * and T * that maximize L(q, T ):", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Embedding Model: TopicVec", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "E-Step \u03c0 k ij \u221d exp (\u03b8 ik ) + v wij t k + r k ,", |
| "eq_num": "(6)" |
| } |
| ], |
| "section": "Topic Embedding Model: TopicVec", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u03b8 ik = Li j=1 \u03c0 k ij + \u03b1 k ;", |
| "eq_num": "(7)" |
| } |
| ], |
| "section": "Topic Embedding Model: TopicVec", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "M-", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Embedding Model: TopicVec", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Step", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Embedding Model: TopicVec", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "T new = T + \u03bb l, M i=1 L i \u2202L (q, T ) \u2202T ,", |
| "eq_num": "(8)" |
| } |
| ], |
| "section": "Topic Embedding Model: TopicVec", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "r = \u2212 log(u exp{V T }).", |
| "eq_num": "(9)" |
| } |
| ], |
| "section": "Topic Embedding Model: TopicVec", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "Here ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Embedding Model: TopicVec", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": ", \u03bb(l, M i=1 L i ) = L0\u03bb0 l\u2022max{ M", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Topic Embedding Model: TopicVec", |
| "sec_num": "3.2" |
| }, |
| { |
| "text": "TopicVec-Reg assumes that the document d i and the response variable y i associated with the document are generated following the generative process, as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generative Process", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "1. Generate each document d i according to the generative process of TopicVec;", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generative Process", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "2. Draw response variable", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generative Process", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "y i \u223c N (\u03b7 Z i , \u03b4 2 ).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Generative Process", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Here, the mean of the Gaussian distribution is the inner product of the regression coefficients \u03b7 and the expectation of latent topic assignmentsZ i . Figure 2 presents a graphical model of TopicVec-Reg.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 151, |
| "end": 159, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Generative Process", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Similar to TopicVec, we estimate the parameters using the generalized EM algorithm after obtaining the loglikelihood function. First, we rewrite the complete data loglikelihood in (3) to include response variables y = {y i } as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Estimation of Parameters", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "log p(D, A, V , Z, T , \u03c6, y|\u03b1, \u03b3, \u00b5, \u03b7, \u03b4 2 ) =C 0 \u2212 log Z(H, \u00b5) \u2212 A 2 f (H) \u2212 W i=1 \u00b5 i v si 2 + M i=1 K k=1 log \u03c6 ik (m ik + \u03b1 k \u2212 1) \u2212 1 2 log(2\u03c0\u03b4 2 ) \u2212 1 2\u03b4 2 (y 2 i \u2212 2y i \u03b7 Z i + \u03b7 Z iZ i \u03b7) + Li j=1 v wij \uf8eb \uf8ed j\u22121 l=j\u2212c v w il + t zij \uf8f6 \uf8f8 + j\u22121 l=j\u2212c a w il wij + r zij .", |
| "eq_num": "(10)" |
| } |
| ], |
| "section": "Estimation of Parameters", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Then by introducing a variational distribution q(Z, \u03c6; \u03c0, \u03b8) = q(\u03c6; \u03b8)q(Z; \u03c0) as in TopicVec, the expectation of the variational distribution of the loglikelihood of the response variable y i is obtained by:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Estimation of Parameters", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "E q log p y i |Z i , \u03b7, \u03b4 2 = \u2212 1 2 log 2\u03c0\u03b4 2 \u2212 1 2\u03b4 2 y 2 i \u2212 2y i \u03b7 E q [Z i ] + \u03b7 E q Z iZ i \u03b7 ,", |
| "eq_num": "(11)" |
| } |
| ], |
| "section": "Estimation of Parameters", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "where", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Estimation of Parameters", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "E q [Z i ] =\u03c0 i = 1 L i Li j=1 \u03c0 ij , E q Z iZ i = 1 L i 2 \uf8eb \uf8ed Li j=1 m =j \u03c0 ij \u03c0 im + Li j=1 diag {\u03c0 ij } \uf8f6 \uf8f8 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Estimation of Parameters", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Thus, the objective L reg (q, T ) is obtained by adding (11) to (5):", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Estimation of Parameters", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "L reg (q, T ) = M i=1 \uf8f1 \uf8f2 \uf8f3 K k=1 \uf8eb \uf8ed Li j=1 \u03c0 k ij + \u03b1 k \u2212 1 \uf8f6 \uf8f8 (\u03c8 (\u03b8 ik ) \u2212 (\u03b8 i0 )) + \u2212 1 2 log(2\u03c0\u03b4 2 ) \u2212 y 2 i 2\u03b4 2 + Tr \uf8eb \uf8ed T Li j=1 v wij \u03c0 ij \uf8f6 \uf8f8 + r + y i \u03b7 L i \u03b4 2 Li j=1 \u03c0 ij + \u2212\u03b7 \u2022 1 2L i 2 \u03b4 2 Li j=1 Li m =j \u03c0 ij \u03c0 im + Li j=1 diag{\u03c0 ij } \u03b7 + H(q) + C 1", |
| "eq_num": "(12)" |
| } |
| ], |
| "section": "Estimation of Parameters", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We update \u03b8 ik using (7). We can obtain the solution by setting the partial derivative w.r.t. \u03c0 k ij to 0 after isolating the terms containing \u03c0 k ij :", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Estimation of Parameters", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u03c0 k ij \u221d exp \u03c8(\u03b8 ik ) + v wij t k + r k + y i \u03b7 k L i \u03b4 2 \u2212 \u03b7 \u03a0 (k) i,\u2212j \u03b7 + (\u03b7 k ) 2 2L i 2 \u03b4 2 ,", |
| "eq_num": "(13)" |
| } |
| ], |
| "section": "Estimation of Parameters", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "where", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Estimation of Parameters", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "\u03a0 (k) i,\u2212j := Li m =j \u03a0 im diag{0 (1) , \u2022 \u2022 \u2022 , 1 (k) , \u2022 \u2022 \u2022 , 0 (K) } + diag{0 (1) , \u2022 \u2022 \u2022 , 1 (k) , \u2022 \u2022 \u2022 , 0 (K) } Li m =j \u03a0 im", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Estimation of Parameters", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "is the partial derivative of", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Estimation of Parameters", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Li m =j \u03c0 ij \u03c0 im w.r.t. \u03c0 k ij . \u03a0 im is a K \u00d7 K matrix whose row is (\u03c0 1 im , \u03c0 2 im , \u2022 \u2022 \u2022 , \u03c0 K im ). The terms containing \u03b7 and \u03b4 2 in the learning objective can be found in (11). So we define a M \u00d7(K +1) matrix A whose i-th row is (Z i , 1) with the (K +1)-th element corresponding to the bias and rewrite (11) of the whole corpus as below:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Li j=1", |
| "sec_num": null |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u03b7 = Concat(\u03b7, \u03b7 bias ), E q [log p(y|A, \u03b7 , \u03b4 2 )] = \u2212 M 2 log(2\u03c0\u03b4 2 ) \u2212 1 2\u03b4 2 E q (y \u2212 A\u03b7 ) (y \u2212 A\u03b7 ) ,", |
| "eq_num": "(14)" |
| } |
| ], |
| "section": "Li j=1", |
| "sec_num": null |
| }, |
| { |
| "text": "where \u03b7 is obtained by the function Concat(\u2022) that concatenates the bias term to the end of \u03b7. Taking the derivative w.r.t. \u03b7 and \u03b4 2 and setting them to 0, respectively, we obtain the following:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Li j=1", |
| "sec_num": null |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u03b7 new = E q A A \u22121 E q [A] y,", |
| "eq_num": "(15)" |
| } |
| ], |
| "section": "Li j=1", |
| "sec_num": null |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "\u03b4 2 new = 1 M y y \u2212 y E q [A] E q A A \u22121 E q [A] y", |
| "eq_num": "(16)" |
| } |
| ], |
| "section": "Li j=1", |
| "sec_num": null |
| }, |
| { |
| "text": "where we define \u03c0 ij := Concat(\u03c0 ij , 1), and", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Li j=1", |
| "sec_num": null |
| }, |
| { |
| "text": "E [A] = 1 L i Li j=1 \u03c0 ij , E A A = M i=1 \uf8eb \uf8ed 1 L i 2 \uf8eb \uf8ed Li j=1 Li m =j \u03c0 ij \u03c0 im + Li j=1 diag{\u03c0 ij } \uf8f6 \uf8f8 \uf8f6 \uf8f8 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Li j=1", |
| "sec_num": null |
| }, |
| { |
| "text": "Column (K+1) corresponds to the bias term of the regression coefficient. The topic embedding T is updated by the gradient descent method as shown in (8).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Li j=1", |
| "sec_num": null |
| }, |
| { |
| "text": "To evaluate the prediction performance of TopicVec-Reg, we conducted an evaluation experiment in comparison with Top-icVec.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "As text data, we used Japanese financial articles distributed by Thomson Reuters in 2017. The stock return rate of the company mentioned in the article is tied to the article as the response variable, which is defined as", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "R = V f \u2212 V f V f", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "where : V f = final value on the day after the article was published V f = final value on the day before the article is published We used the Tokyo Stock Exchange's historical data for stock prices. When more than one company were mentioned in an article, we sorted the stock return rates of those companies in descending order, then removed those whose absolute value was less than the mean plus one standard deviation, because the stock return rates of such companies may have not been affected by the article. We excluded the articles each of which mentions more than 5 companies, considering that such an article is likely to focus on an industry trend rather than several specific companies. We divided the financial articles distributed from January to June 2017 into two-monthly segments and prepared five preprocessed datasets in time order as shown in Table 2 . The datasets Term 1\u223c4 were used as training sets for closed test, and the dataset Term 5 was used for open test.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 861, |
| "end": 868, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Dataset", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "We performed preprocessing on the text data. After removing intractable tables and unneeded expressions in the articles, we performed morphological analysis using MeCab with mecab-ipadic-NEologd [Sato, 2015] [Sato et al., 2017] , a dictionary of neologisms and unique expressions, to segment words and exclude stop words such as particles and conjunctions. Finally, low-frequency words occurring in less than 5 articles and articles of less than 50 words were removed.", |
| "cite_spans": [ |
| { |
| "start": 195, |
| "end": 207, |
| "text": "[Sato, 2015]", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 208, |
| "end": 227, |
| "text": "[Sato et al., 2017]", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "In our experiments, we compared two models: our proposed model (TopicVec-Reg) and a baseline model (TopicVec+LR), as follows:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "TopicVec-Reg: Estimate the regression parameters and topics simultaneously.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "TopicVec+LR: Perform linear regression after topic estimation with TopicVec, as a baseline.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "TopicVec-Reg was compared to TopicVec+LR in the closed test with 10 different number of topics K \u2208 {5, 10, 15, 20, 25, 30, 35, 40, 45, 50}, and then an open test was performed with the number of topics that brought the smallest average MSE in the closed test.", |
| "cite_spans": [ |
| { |
| "start": 99, |
| "end": 139, |
| "text": "{5, 10, 15, 20, 25, 30, 35, 40, 45, 50},", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "First, we conducted the closed test on the training set from Term 1 to Term 4 to obtain the topic embeddings T and the regression coefficient \u03b7. Specifically, for the test on Term 1, T was randomly initialized following a Gaussian distribution, and the variational parameter \u03c0 was randomly initialized following a uniform distribution at the beginning of learning. Then the topic embeddings T obtained on Term 1 was used as the initial T for Term 2. During training, \u03b7 was updated Term", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Model K=5 K=10 K=15 K=20 K=25 K=30 K=35 K=40 K=45 K=50", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "1 TV-Reg 0.005645 0.005229 0.004957 0.004138 0.004515 0.003904 0.004264 0.003621 0.004113 0.00355 TV+LR 0.005775 0.005722 0.005605 0.00517 0.005179 0.005054 0.005013 0.004893 0.004837 0.004598 2 TV-Reg 0.004411 0.004315 0.004115 0.003965 0.003846 0.003764 0.003708 0.00363 0.003413 0.003524 TV+LR 0.004519 0.004416 0.004277 0.004071 0.004015 0.004082 0.004267 0.004039 0.003824 0.003691 3 TV-Reg 0.004071 0.003916 0.003719 0.003643 0.003578 0.003496 0.003569 0.003159 0.003031 0.003278 TV+LR 0.004104 0.003869 0.003754 0.003771 0.003591 0.003557 0.003721 0.003732 0.003492 0.00311 4 TV-Reg 0.005671 0.005388 0.004864 0.005114 0.004863 0.004551 0.004869 0.004551 0.004762 0.004436 TV+LR 0.005691 0.005329 0.00531 0.005241 0.00514 0.004958 0.005024 0.005139 0.004999 0.004711 average TV-Reg 0.00495 0.004712 0.004414 0.004215 0.004201 0.003929 0.004103 0.00374 0.00383 0.003697 TV+LR 0.005022 0.004834 0.004737 0.004563 0.004481 0.004413 0.004506 0.004451 0.004288 0.004028 Table 3 : MSE in the closed test. every 5 iterations and was used to predict the response variable when the learning process converged.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 972, |
| "end": 979, |
| "text": "Table 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "We used the same experimental setup for TopicVec+LR, as for TopicVec-Reg. TopicVec is used to estimate the topics, and then the linear regression was used to estimate the regression coefficients.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "The word embeddings V and the residuals A were trained by PSDVec using the Japanese Wikipedia. The hyperparameter was fixed as \u03b1 = (0.1,", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "\u2022 \u2022 \u2022 , 0.1).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "In all experiments, the convergence condition was that the rate of change of \u03c0 must be less than 0.1% for three consecutive times during learning.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "We predict the stock return rate as below:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "EQUATION", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [ |
| { |
| "start": 0, |
| "end": 8, |
| "text": "EQUATION", |
| "ref_id": "EQREF", |
| "raw_str": "y i = \u03b7 E q [Z i ] + \u03b7 bias = \u03b7 1 L i Li j=1 \u03c0 ij", |
| "eq_num": "(17)" |
| } |
| ], |
| "section": "Experimental Setup", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "We used the Mean Squared Error (MSE) between the true and predicted values of the stock return rate as the measure of prediction performance:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "MSE = 1 M M i=1 (y i \u2212\u0177 i ) 2", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Finally, for the number of topics K with the smallest mean value of MSE in the closed test, the topic embeddings T obtained on Term 4 was used as the initial T to estimate the topics on Term 5 with TopicVec. Since the data for the first month of Term 5 was already used to train the model on Term 4, we predicted the response variables of the data of the second month of Term 5 using \u03b7 obtained with Term 4 and then calculated the MSE between the true values and the predicted ones as the evaluation metric of the open tests. Table 3 shows the MSEs obtained as the result of the closed test on the training sets and their average values. The numbers in blue show that the proposed model has better prediction accuracy than the baseline model. TV-Reg 0.01851 \u00b1 0.09196 TV+LR 0.01917 \u00b1 0.08924 As noted in Table 3 , TopicVec-Reg has better prediction accuracy than TopicVec+LR when K \u2208 {5, 15, 20, 25, 30, 35, 40, 45} . Moreover, Figure 3 shows that TopicVec-Reg has better prediction accuracy than TopicVec+LR on all of K on average.", |
| "cite_spans": [ |
| { |
| "start": 884, |
| "end": 915, |
| "text": "{5, 15, 20, 25, 30, 35, 40, 45}", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 526, |
| "end": 533, |
| "text": "Table 3", |
| "ref_id": null |
| }, |
| { |
| "start": 804, |
| "end": 811, |
| "text": "Table 3", |
| "ref_id": null |
| }, |
| { |
| "start": 928, |
| "end": 936, |
| "text": "Figure 3", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experimental Setup", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "Since the average of MSE was the smallest when K = 50, we performed an open test on Term 5 when K = 50. Table 4 shows the result of MSE and standard deviation in the open test. We further carried out the Wilcoxon signed-rank test for the open-test result and then observed that the p-value was less then 1%, indicating that TopicVec-Reg has better prediction accuracy than TopicVec+LR with a statistically significant difference.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 104, |
| "end": 111, |
| "text": "Table 4", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Results", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "In this paper, we proposed TopicVec-Reg, a topic embedding regression model combining TopicVec and linear regression, with the aim of predicting the stock return rate of the company mentioned in each financial article. The result of the closed test on the training datasets showed that our proposed model has better prediction accuracy on average than Top-icVec+LR, and statistically significant improvement in prediction accuracy was also observed in the open test under the best number of topics. More detailed evaluation, such as compared with some other models, is left for the future work.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "i=1 Li,L0} is the learning rate, l is the number of iterations in the learning process, L 0 is a predetermined threshold of the number of words, \u03bb 0 is the initial value of \u03bb, and u is the unigram probability of the words occurring in the corpus.4 Topic Embedding Regression ModelIn this section, we propose the topic embedding regression model, TopicVec-Reg, by incorporating regression function into TopicVec mentioned in the previous section.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Supervised topic models", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mcauliffe", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "David", |
| "suffix": "" |
| }, |
| { |
| "first": "Jon", |
| "middle": [ |
| "D" |
| ], |
| "last": "Blei", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Mcauliffe", |
| "suffix": "" |
| } |
| ], |
| "year": 2010, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1003.0783" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "and McAuliffe, 2010] David M Blei and Jon D McAuliffe. Supervised topic models. arXiv preprint arXiv:1003.0783, 2010.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Gaussian lda for topic models with word embeddings", |
| "authors": [], |
| "year": 2003, |
| "venue": "Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th International Joint Conference on Natural Language Processing", |
| "volume": "3", |
| "issue": "", |
| "pages": "439--453", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "et al., 2003] David M Blei, Andrew Y Ng, and Michael I Jordan. Latent dirichlet allocation. the Journal of machine Learning research, 3:993-1022, 2003. [Das et al., 2015] Rajarshi Das, Manzil Zaheer, and Chris Dyer. Gaussian lda for topic models with word embed- dings. In Proceedings of the 53rd Annual Meeting of the Association for Computational Linguistics and the 7th In- ternational Joint Conference on Natural Language Pro- cessing (Volume 1: Long Papers), pages 795-804, 2015. [Dieng et al., 2020] Adji B Dieng, Francisco JR Ruiz, and David M Blei. Topic modeling in embedding spaces. Transactions of the Association for Computational Lin- guistics, 8:439-453, 2020.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "A generative word embedding model and its low rank positive semidefinite solution", |
| "authors": [], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1508.03826" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "et al., 2015] Shaohua Li, Jun Zhu, and Chunyan Miao. A generative word embedding model and its low rank positive semidefinite solution. arXiv preprint arXiv:1508.03826, 2015. [Li et al., 2016] Shaohua Li, Tat-Seng Chua, Jun Zhu, and Chunyan Miao. Generative topic embedding: a contin- uous representation of documents (extended version with proofs). arXiv preprint arXiv:1606.02979, 2016. [Sato et al., 2016] Toshinori Sato, Taiichi Hashimoto, and Manabu Okumura. Operation of a word segmentation dic- tionary generation system called neologd (in japanese).", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Implementation of a word segmentation dictionary called mecab-ipadic-neologd and study on how to use it effectively for information retrieval", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Sato", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the Twenty-three Annual Meeting of the Association for Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2017--2023", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "In Information Processing Society of Japan, Special In- terest Group on Natural Language Processing (IPSJ- SIGNL), pages NL-229-15. Information Processing Soci- ety of Japan, 2016. [Sato et al., 2017] Toshinori Sato, Taiichi Hashimoto, and Manabu Okumura. Implementation of a word segmen- tation dictionary called mecab-ipadic-neologd and study on how to use it effectively for information retrieval (in japanese). In Proceedings of the Twenty-three Annual Meeting of the Association for Natural Language Process- ing, pages NLP2017-B6-1. The Association for Natural Language Processing, 2017.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Neologism dictionary based on the language resources on the web for mecab", |
| "authors": [ |
| { |
| "first": "Toshinori", |
| "middle": [], |
| "last": "Sato", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": ", 2015] Toshinori Sato. Neologism dictionary based on the language resources on the web for mecab, 2015.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "type_str": "figure", |
| "text": "A graphical model of TopicVec.", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF1": { |
| "type_str": "figure", |
| "text": "A graphical model of TopicVec-Reg.", |
| "num": null, |
| "uris": null |
| }, |
| "FIGREF2": { |
| "type_str": "figure", |
| "text": "Average MSE for the varying number of topics K.", |
| "num": null, |
| "uris": null |
| }, |
| "TABREF0": { |
| "type_str": "table", |
| "text": "", |
| "html": null, |
| "num": null, |
| "content": "<table/>" |
| }, |
| "TABREF2": { |
| "type_str": "table", |
| "text": "Overview of the datasets.", |
| "html": null, |
| "num": null, |
| "content": "<table/>" |
| }, |
| "TABREF3": { |
| "type_str": "table", |
| "text": "MSE and standard deviation in the open test when K=50.", |
| "html": null, |
| "num": null, |
| "content": "<table/>" |
| } |
| } |
| } |
| } |