| { |
| "paper_id": "P19-1038", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T08:22:09.501333Z" |
| }, |
| "title": "What You Say and How You Say It Matters: Predicting Financial Risk Using Verbal and Vocal Cues", |
| "authors": [ |
| { |
| "first": "Yu", |
| "middle": [], |
| "last": "Qin", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Yi", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "imyiyang@ust.hk" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Predicting financial risk is an essential task in financial market. Prior research has shown that textual information in a firm's financial statement can be used to predict its stock's risk level. Nowadays, firm CEOs communicate information not only verbally through press releases and financial reports, but also nonverbally through investor meetings and earnings conference calls. There are anecdotal evidences that CEO's vocal features, such as emotions and voice tones, can reveal the firm's performance. However, how vocal features can be used to predict risk levels, and to what extent, is still unknown. To fill the gap, we obtain earnings call audio recordings and textual transcripts for S&P 500 companies in recent years. We propose a multimodal deep regression model (MDRM) that jointly model CEO's verbal (from text) and vocal (from audio) information in a conference call. Empirical results show that our model that jointly considers verbal and vocal features achieves significant and substantial prediction error reduction. We also discuss several interesting findings and the implications to financial markets. The processed earnings conference calls data (text and audio) are released for readers who are interested in reproducing the results or designing trading strategy.", |
| "pdf_parse": { |
| "paper_id": "P19-1038", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Predicting financial risk is an essential task in financial market. Prior research has shown that textual information in a firm's financial statement can be used to predict its stock's risk level. Nowadays, firm CEOs communicate information not only verbally through press releases and financial reports, but also nonverbally through investor meetings and earnings conference calls. There are anecdotal evidences that CEO's vocal features, such as emotions and voice tones, can reveal the firm's performance. However, how vocal features can be used to predict risk levels, and to what extent, is still unknown. To fill the gap, we obtain earnings call audio recordings and textual transcripts for S&P 500 companies in recent years. We propose a multimodal deep regression model (MDRM) that jointly model CEO's verbal (from text) and vocal (from audio) information in a conference call. Empirical results show that our model that jointly considers verbal and vocal features achieves significant and substantial prediction error reduction. We also discuss several interesting findings and the implications to financial markets. The processed earnings conference calls data (text and audio) are released for readers who are interested in reproducing the results or designing trading strategy.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Predicting financial risks of publicly traded companies is of great interest to capital market participants. In finance, stock price volatility, which is the standard deviation of a stock's returns over a period of time, is often used as a measure of financial risks. Unlike directly predicting stock prices, it is uncontroversial in the field of economics that one can predict a stock's volatility level using publicly available information (Bernard et al., 2007) . Based on this assumption, a burgeoning body of research, both in finance and computational linguistics, has studied predicting stock volatility using various textual sources, including company disclosed reports (Kogan et al., 2009) , public news articles (Tetlock, 2007) , company earnings call transcripts (Wang and Hua, 2014) , and social media (Ding et al., 2015) .", |
| "cite_spans": [ |
| { |
| "start": 442, |
| "end": 464, |
| "text": "(Bernard et al., 2007)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 678, |
| "end": 698, |
| "text": "(Kogan et al., 2009)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 722, |
| "end": 737, |
| "text": "(Tetlock, 2007)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 774, |
| "end": 794, |
| "text": "(Wang and Hua, 2014)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 814, |
| "end": 833, |
| "text": "(Ding et al., 2015)", |
| "ref_id": "BIBREF7" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Thanks to technological advances, massive amounts of unstructured multimedia data, such as investor conference audio records and CEO public speech videos, have been archived and can be accessed by institutional and individual investors. Everything CEOs (or other executives) say will be closely examined and analyzed by investors. There are anecdotal evidences that CEO's nonverbal features, such as emotions and voice tones, can also be used to reveal firm's performance. For example, it has been reported that hedge fund companies hire ex-CIA agents trained in reading nonverbal cues to assess public statements by managers 1 . While prior research in speech communication has reported that the vocal cues have the power to strengthen or weaken the verbal message, and vocal cues can reflect speaker's affective states or emotion, little research has studied the interplay of verbal cues (language) and nonverbal cues (voice) and their impact on the financial markets.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "To fill the gap, we choose a novel multimodal learning setting of company earnings conference call. Earnings conference calls are the periodic conference calls company executives hold with outside investors and analysts to discuss financial results and answer questions raised by analysts. There are three reasons that we choose earnings conference calls as our research setting. First, almost all of the calls are webcast live, and they are later archived on company investor relation (IR) websites or third-party databases. Therefore, both audio and text modalities are available so that we can align vocal cues with verbal cues in multimodal learning, and examine the interplay of both modalities and their impact on the financial markets. Secondly, company earnings announcements are one of biggest stock-moving events. If company reports an earning that does not meet analyst expectation or the CEO fails to address critical questions during the conference call, it often causes significant stock price moves, i.e. high volatility. Lastly, the audio recording and textual transcripts of company earnings conference calls are publicly accessible so interested readers can reproduce the results.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "In our work, we propose a stock volatility prediction pipeline using company earnings conference call audio and text data. We construct a unique dataset containing conference call audio and text data of S&P 500 companies in recent years. We then align each sentence in the call transcript with the corresponding audio recording clip. For the multimodal learning, we propose a Multimodal Deep Regression Model (MDRM). The MDRM model utilizes BiLSTM layer to extract context-dependent unimodal features, and subsequently fuses unimodal features together using another layer of BiLSTM to extract multimodal inter-dependencies for the regression task. We empirically demonstrates that MDRM models outperform other benchmark methods significantly and substantially. More importantly, the empirical results confirm that audio modality (vocal cues) help to improve volatility prediction accuracy and may reveal the fact that market participants listen to not only what CEOs say but also how CEOs say it.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Our contributions can be summarized in two folds. First, we are among the first to study the impact of both verbal and vocal features on financial markets, specifically, stock volatility. Secondly, we empirically show that multimodal learning with audio and text can indeed reduce prediction error, compared to previous work that relies on text only. The interesting finding that vocal cues play a role in stock volatility is worth further exploring. In the next section, we briefly provide institutional background on earnings conference call and its impact on financial markets. In Section 3, we outline related work in financial text regression and multimodal learning. We then present our earnings conference call dataset and how data is processed in Section 4. In section 5, we introduce our multimodal learning framework that fuses ver-bal and vocal features in a deep model. Experiments results are presented in Section 6. Our experiment results show several interesting findings, which we discuss in Section 7. Finally, we conclude this paper in Section 8.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Earnings Announcement Drift (PEAD)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Earnings Conference Call and Post", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Earnings calls are quarterly conference calls company executives hold with outside investors and analysts to discuss firm overall performance. An earnings call consists of two sections: an introduction section and a question-and-answer section. During the introduction section, executives such as CEOs and CFOs read forward-looking statements and provide their information and interpretation of their firms performance during the quarter. During the question-and-answer section, analysts have the opportunity to request managers to clarify information and solicit additional information that the management team does not disclose in the introduction section. The National Investor Relations Institute reports that 92% of companies conduct earnings calls. Institutional and individual investors listen to the earnings call and spot the tones of executives that portend good or bad news for the company.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Earnings Conference Call and Post", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Company earnings conference call can often result in significant stock price moves. For example, Facebook's stock price dropped over 20% during its nightmare earnings call (second quarter 2018) when the executives said the company expected a revenue growth slowdown in the years ahead. In finance and accounting research, Post Earnings Announcement Drift (PEAD) is a well documented phenomenon that a stock's abnormal returns drift in the direction of an earnings surprise for several weeks following an earnings announcement (Ball and Brown, 1968; Bernard and Thomas, 1989) . Moreover, the finance and accounting literature has shown that the stock price moves are largely due to the market reaction to the earnings announcement. The move is most significant during the earnings conference call when the executives start to take analysts questions. In our work, we focus on using executive's verbal and nonverbal cues in conference calls to predict stock price volatility for days following the calls.", |
| "cite_spans": [ |
| { |
| "start": 526, |
| "end": 548, |
| "text": "(Ball and Brown, 1968;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 549, |
| "end": 574, |
| "text": "Bernard and Thomas, 1989)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Earnings Conference Call and Post", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Our work is closely related with the following two lines of research: financial risk prediction with multimedia data: It is a received wisdom in economics and finance that one can predict a stock's risk using historical information (Bernard et al., 2007) . Various work has studied the problem of financial risk prediction using firm financial reports. A pioneer work (Kogan et al., 2009) shows that simple bagof-words features in firm annual report (Form 10-Ks) combined with historical volatility can simply outperform statistical models that is built upon historical volatility only. Other work (Tsai and Wang, 2014; Nopp and Hanbury, 2015; Rekabsaz et al., 2017; Theil et al., 2018; Wang and Hua, 2014) also proposes different document representation methods to predict stock price volatility. To the best of our knowledge, none of existing NLP research on stock volatility prediction considers the usage of vocal features from audio data, especially the interplay between vocal and verbal features. In finance research, only two studies Hobson et al., 2012) have examined the executive voice in earnings calls. However, they extract CEO's affective state from a blackbox third-party audio processing software, the validity of which has been seriously questioned (Lacerda, 2012) .", |
| "cite_spans": [ |
| { |
| "start": 232, |
| "end": 254, |
| "text": "(Bernard et al., 2007)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 368, |
| "end": 388, |
| "text": "(Kogan et al., 2009)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 598, |
| "end": 619, |
| "text": "(Tsai and Wang, 2014;", |
| "ref_id": "BIBREF29" |
| }, |
| { |
| "start": 620, |
| "end": 643, |
| "text": "Nopp and Hanbury, 2015;", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 644, |
| "end": 666, |
| "text": "Rekabsaz et al., 2017;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 667, |
| "end": 686, |
| "text": "Theil et al., 2018;", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 687, |
| "end": 706, |
| "text": "Wang and Hua, 2014)", |
| "ref_id": "BIBREF30" |
| }, |
| { |
| "start": 1042, |
| "end": 1062, |
| "text": "Hobson et al., 2012)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 1267, |
| "end": 1282, |
| "text": "(Lacerda, 2012)", |
| "ref_id": "BIBREF15" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "3" |
| }, |
| { |
| "text": "multimodal learning: Despite our financial domain, our approach is relevant to multimodal learning using text and audio. Recent studies on speech communication have shown that a speaker's acoustic features, such as voice pitch, amplitude, and intensity, are highly correlated with the speaker's emotion (Bachorowski, 1999) , deception or trustworthiness (Sporer and Schwandt, 2006; Belin et al., 2017) , anxiety (Laukka et al., 2008) and confidence or doubt (Jiang and Pell, 2017) .", |
| "cite_spans": [ |
| { |
| "start": 303, |
| "end": 322, |
| "text": "(Bachorowski, 1999)", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 354, |
| "end": 381, |
| "text": "(Sporer and Schwandt, 2006;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 382, |
| "end": 401, |
| "text": "Belin et al., 2017)", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 412, |
| "end": 433, |
| "text": "(Laukka et al., 2008)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 458, |
| "end": 480, |
| "text": "(Jiang and Pell, 2017)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Recently, multimodal learning has drawn attentions for different applications, such as sentiment analysis (Zadeh et al., 2016b,a; Poria et al., 2017; Luo et al., 2018) , image caption generation (You et al., 2016) , suicide risk detection (Scherer et al., 2016) , crime drama understanding (Frermann et al., 2018) and human trafficking detection (Tong et al., 2017) . To the best of our knowledge, this work presents the first multimodal deep learning model using text and audio features for a financial markets application.", |
| "cite_spans": [ |
| { |
| "start": 106, |
| "end": 129, |
| "text": "(Zadeh et al., 2016b,a;", |
| "ref_id": null |
| }, |
| { |
| "start": 130, |
| "end": 149, |
| "text": "Poria et al., 2017;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 150, |
| "end": 167, |
| "text": "Luo et al., 2018)", |
| "ref_id": "BIBREF17" |
| }, |
| { |
| "start": 195, |
| "end": 213, |
| "text": "(You et al., 2016)", |
| "ref_id": "BIBREF31" |
| }, |
| { |
| "start": 239, |
| "end": 261, |
| "text": "(Scherer et al., 2016)", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 290, |
| "end": 313, |
| "text": "(Frermann et al., 2018)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 346, |
| "end": 365, |
| "text": "(Tong et al., 2017)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related Work", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In this section, we present dataset details.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Earnings Conference Calls Dataset", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Conference call transcripts have been extensively studied in prior research. However, there is no existing conference call audio dataset. Therefore, we set up our S&P 500 Earnings Conference Calls dataset by acquiring audio records and text transcripts from the following two sources.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Acquisition", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Earnings Call Transcripts. The earnings call transcripts are obtained from the website Seeking Alpha 2 . The transcripts are well labeled, including the name of speaker (executives and analysts) and speech content.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Acquisition", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "Earnings Call Audio. Given each transcript, we download corresponding audio recording from the website EarningsCast 3 . The downloaded audio data does not provide any segmentation or labeling for speakers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Acquisition", |
| "sec_num": "4.1" |
| }, |
| { |
| "text": "It is too coarse to extract audio features at the conference call transcript level, and it is also too difficult to segment audio recordings at word level. Therefore, we analyze each conference call at sentence level. That is, we want to represent a conference call as a sequence of sentences with corresponding audio clips.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Processing", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Since conference call normally lasts for about one hour, determining, for each sentence of the transcript, the time interval (in the audio file) containing the spoken text of the sentence is quite challenging. To tackle this challenge, we propose an Iterative Forced Alignment (IFA) algorithm to align each sentence of the transcript with the audio clip containing the spoken text of the sentence. Due to space limit, we present the details of IFA in Appendix. Furthermore, to avoid interference among different speakers, we select only the sentenece made by the most spoken executive (usually the CEO). After the forced alignment step, for each sentence in the conference call transcript, we obtain the sentence text as well as its corresponding audio clip 4 .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Data Processing", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "We use pre-trained word embeddings and calculate the arithmetic mean of word vector in each sentence as the sentence representation. We choose the embedding GloVe-300 (Pennington et al., 2014) pre-trained on Wikipedia and Gigaword 5 5 . Therefore, each sentence is represented as a 300-dimension vector.", |
| "cite_spans": [ |
| { |
| "start": 167, |
| "end": 192, |
| "text": "(Pennington et al., 2014)", |
| "ref_id": "BIBREF21" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Textual Features", |
| "sec_num": null |
| }, |
| { |
| "text": "Audio Features We use Praat (Boersma and Van Heuven, 2001) to extract vocal features, such as pitch, intensity, jitter, HNR(Harmonic to Noise Ratio) and etc, from audio recordings. A total of 27 vocal features are extracted by Praat.", |
| "cite_spans": [ |
| { |
| "start": 28, |
| "end": 58, |
| "text": "(Boersma and Van Heuven, 2001)", |
| "ref_id": "BIBREF6" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Textual Features", |
| "sec_num": null |
| }, |
| { |
| "text": "In summary, for each sentence in an earnings conference call, we generate a 300-dimension text vector and a 27-dimension audio vector to represent verbal and vocal features separately.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Textual Features", |
| "sec_num": null |
| }, |
| { |
| "text": "Data Statistics We build our dataset by acquiring all S&P 500 companies' quarterly earnings conference calls in 2017. We choose S&P 500 constituent firms as the target for volatility prediction for reasons of importance and tractability. Firms in the S&P 500 index encompass roughly three-quarters of the total U.S. market capitalization. A total of 2,243 earnings conference calls are downloaded from Seeking Alpha and EarningsCast. We discard conference calls which text-audio alignment is not done properly, using the abovementioned data processing method. The final dataset consists of 576 conference calls, with a total number of 88,829 sentences. It can be seen that we discard a large proportion of raw data because the audio-text alignment is very noisy and is prone to errors. We release our processed earnings conference calls dataset 6 (text and audio) for readers who are interested in reproducing the results.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Textual Features", |
| "sec_num": null |
| }, |
| { |
| "text": "We formalize the problem as a supervised machine learning task. The input data is a company's earnings conference call verbal (textual) features and corresponding vocal (audio) features; This is mapped to a numerical variable which is the company's stock price volatility following the conference call.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Prior research (Kogan et al., 2009; Rekabsaz et al., 2017) uses only shallow machine learning model (such as logistic regression) and bag-of-word features to represent financial documents. In other words, the relation and dependencies among the sentences are largely ignored. However, every sentence in a conference call is spoken at a distinct time and in a particular order. Therefore, it is better to treat a conference call as a sequence of sentences. To this end, like other sequence classification problems, we choose to use a recurrent neural network to capture the sentences relation and dependency.", |
| "cite_spans": [ |
| { |
| "start": 15, |
| "end": 35, |
| "text": "(Kogan et al., 2009;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 36, |
| "end": 58, |
| "text": "Rekabsaz et al., 2017)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "5" |
| }, |
| { |
| "text": "When multimodal verbal and vocal features are available, it is also important to capture the dependency between different modalities, as the vocal cues either affirm or discredit the verbal message. For example, if a CEO says \"we are confident about the future product sales\" with a voice that is different from the CEO's base vocal cues, such as increased pitch or pauses, we may infer that the CEO is not as confident as he claims. In fact, existing research (Jiang and Pell, 2017) in speech communication has shown that voice (vocal cues) plays a critical role in verbal communication. If we ignore the voice patterns that are accompanied with the verbal language, we may misinterpret the CEO's statement. Especially in financial markets where CEO's word and voice are closely examined by professional analysts and investors, it is plausible that market reacts to both verbal and vocal signals.", |
| "cite_spans": [ |
| { |
| "start": 461, |
| "end": 483, |
| "text": "(Jiang and Pell, 2017)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "5" |
| }, |
| { |
| "text": "Therefore, we present a deep model to capture context-dependent unimodal features and fuse multimodal features for the regression task. The high-level idea behind the design is to use contextual BiLSTM to extract context-dependent unimodal features separately corresponding to each sentence, and then use a BiLSTM to fuse multimodalities and extract the inter-dependencies between different modalities. The details of our model is described below.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Model", |
| "sec_num": "5" |
| }, |
| { |
| "text": "We first introduce our notations. Let M be the total number of conference call transcripts while the longest one has N sentences. Then we denote X j as the j th conference call, where 1 \u2264 j \u2264 M . In our multimodal setting, X j = [T j ; A j ]. T j is a N \u00d7 dt matrix that represents the document embeddings of the call transcripts, where N is the number of sentences in a document 7 and dt is the dimensions of word embedding. A j is a N \u00d7 da matrix that represents the vocal features extracted from earnings call audios, where da is the dimensions audio feature. y j and\u0177 j represent the true and predicted stock volatility value corresponding to j th conference call.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Notations", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "Our multimodal deep regression model (MDRM) includes two components. The first component is a contextual BiLSTM that extracts unimodal features for either text or audio modality. The contextual BiLSTM is able to capture the relationship and dependency for unimodal inputs. In the second component, the extracted multimodal (text and audio) features are then combined and are fed into a BiLSTM with a fully-connected layer, which extracts inter-dependencies between text and audio modality.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Multimodal Deep Regression Model", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "The Contextual LSTM is proposed by (Poria et al., 2017) , designed to analyze video emotion utilizing text, speech and video image. The contextual LSTM connects dense layers and softmax output with each LSTM unit. In the implementation, this architecture is also called time-distributed dense layer. This structure helps maintain the latent time sequence in data while making sentiment classification on the utterance level. In our contextual LSTM, we choose the BiL-STM as fundamental LSTM architecture by its best performence in past work (Poria et al., 2017) . BiLSTM is the bidirectional LSTM (Hochreiter and Schmidhuber, 1997) , which is an extended model of recurrent neural network (RNN). Specifically, LSTM is designed to acquire key information from time series data while overcoming the defect that traditional RNN might lose information in long time series. BiLSTM is then developed from LSTM, considering not only the forward information transfer but backward transfer. The bidirectional information transmission significantly improves model prediction power. For the construction of Contextual BiLSTM, detailed formulas (Only forward transmission formulas) are described below.", |
| "cite_spans": [ |
| { |
| "start": 35, |
| "end": 55, |
| "text": "(Poria et al., 2017)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 541, |
| "end": 561, |
| "text": "(Poria et al., 2017)", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 597, |
| "end": 631, |
| "text": "(Hochreiter and Schmidhuber, 1997)", |
| "ref_id": "BIBREF12" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extracting Unimodal Features with Contextual BiLSTM", |
| "sec_num": "5.2.1" |
| }, |
| { |
| "text": "f j = \u03c3 g (W f x j + U f h j\u22121 + b f ) i j = \u03c3 g (W i x j + U i h j\u22121 + b i ) o j = \u03c3 g (W o x j + U o h j\u22121 + b o ) c j = f j \u2022 c j\u22121 + i j \u2022 \u03c3 c (W c x j + U c h j\u22121 + b c ) h j = o j \u2022 \u03c3 h (c j ) Z j = ReLU (W z h j + b z )", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extracting Unimodal Features with Contextual BiLSTM", |
| "sec_num": "5.2.1" |
| }, |
| { |
| "text": "In the above formulas, x j denotes the j th input features, i.e., the j th sentence textual or audio features. f j , i j , and o j represent the standard forget gate, input gate and output gate. W and b are trainable vectors in the training process, and all the vectors described above are used to generate hidden state h j and cell state c j . Z j in the last formula stands for the output of time-distributed dense layer connected to the j th LSTM unit.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extracting Unimodal Features with Contextual BiLSTM", |
| "sec_num": "5.2.1" |
| }, |
| { |
| "text": "Compared with Poria's work (Poria et al., 2017) , we remove the softmax output on LSTM unit since our regression is applied on document level, instead of utterance level. The dense layer output is constructed as a new time sequence feature to be further utilized in next stage.", |
| "cite_spans": [ |
| { |
| "start": 27, |
| "end": 47, |
| "text": "(Poria et al., 2017)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Extracting Unimodal Features with Contextual BiLSTM", |
| "sec_num": "5.2.1" |
| }, |
| { |
| "text": "Hierarchical fusion of unimodal features is achieved by our Multimodal Deep Regression Model. Figure 1 demonstrates the integral process. In this process, the hierarchical fusion consists of two stages. Stage 1 Vectors T and A are represented by the matrices on the left. Matrix T is 520 \u00d7 300 dimensional and matrix A is 520 \u00d7 27 dimensional, while 520 is the length of document, 300 and 27 are the dimensions of textual features and audio features. The matrices are then fed into Contextual BiLSTM through a Mask layer to screen the effect of zero-padding. As described in 5.2.1, Contextual BiLSTM extracts unimodal features for each matrix separately while keep the original chronological order. After extracted, unimodal features are still organized on sentence level so they can be horizontally stitched as merged features in the middle of Figure 1 .", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 94, |
| "end": 102, |
| "text": "Figure 1", |
| "ref_id": "FIGREF1" |
| }, |
| { |
| "start": 845, |
| "end": 853, |
| "text": "Figure 1", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Hierarchical Fusion of Unimodal Features", |
| "sec_num": "5.2.2" |
| }, |
| { |
| "text": "Stage 2 The merged features are then fed into a BiLSTM connected with a two-layer neural network. To be specifically, we avoid the same network architecture as Poria's work (Poria et al., 2017) here to achieve our unique purpose. Unlike video emotion classification, the regression problem in our study is document-level, which means that we do not make prediction on each utterance. Therefore, Contextual BiLSTM is not suitable for stage 2 since the features are already extracted on high-level. In stage 2, we use the BiLSTM connected with a two-layer neural network to complete the regression. The effectiveness of this concision structure will be experimental proved in the experiment result section.", |
| "cite_spans": [ |
| { |
| "start": 173, |
| "end": 193, |
| "text": "(Poria et al., 2017)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Hierarchical Fusion of Unimodal Features", |
| "sec_num": "5.2.2" |
| }, |
| { |
| "text": "The stock volatility prediction problem is formulated following (Kogan et al., 2009) . The volatility is defined as:", |
| "cite_spans": [ |
| { |
| "start": 64, |
| "end": 84, |
| "text": "(Kogan et al., 2009)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment Setup", |
| "sec_num": "6" |
| }, |
| { |
| "text": "v [t\u2212\u03c4,t] = ln \u03c4 i=0 (r t\u2212i \u2212r) 2 \u03c4 (1)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment Setup", |
| "sec_num": "6" |
| }, |
| { |
| "text": "where r t is the return price at day t andr is the mean of the return price over the period of day t \u2212 \u03c4 to day t. The return price is defined as r t = Pt P t\u22121 \u2212 1, where P t is the closing price on day t. We choose different \u03c4 values, including 3, 7, 15, 30 calendar days to evaluate the shortterm and long-term effectiveness of volatility prediction. We obtain daily stock prices of year 2017 (dividend-adjusted) from CRSP database.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment Setup", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We report the performance using the Mean Squared Error (MSE) between the predicted volatility and true volatility:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment Setup", |
| "sec_num": "6" |
| }, |
| { |
| "text": "M SE = 1 M M i=1 (f (X i ) \u2212 y i ) 2 (2)", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment Setup", |
| "sec_num": "6" |
| }, |
| { |
| "text": "where M is the size of the test set, and y i is the true volatility associated with testing example X i .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment Setup", |
| "sec_num": "6" |
| }, |
| { |
| "text": "We consider several stock volatility prediction baselines as described below. Past Volatility. It is often reported in prior research that past volatility is a strong predictor of future volatility. Thus we consider using the volatility of previous \u03c4 -days before conference call to predict the \u03c4 -days volatility following the conference call. We call this baseline v past .", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "tf-idf bag-of-words. It is used in (Kogan et al., 2009) . The feature value is classic tf-idf score. Term frequency (tf) is calculated as T F = n i,j k n k,j , and inverse document frequency (idf) is calculated as IDF = log ( |d| 1+df (t) ), where the n i,j is the number of frequency of term t i in document d j , and k n k,j denotes the sum of all terms appear in document d j . |d| is the total number of document, and df (t) is the sum of documents which contain the term t i .", |
| "cite_spans": [ |
| { |
| "start": 35, |
| "end": 55, |
| "text": "(Kogan et al., 2009)", |
| "ref_id": "BIBREF14" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "word embeddings. Each transcript is represented as a weighted average of word embeddings. In our experiment, we use pre-trained GloVe-300 word embeddings. This document representation is shown to be a simple yet effective method (Arora et al., 2017) . This baseline can help us to evaluate the effectiveness of proposed deep model. We also experiment with pre-trained word embeddings GloVe-50 and GloVe-100 but find GloVe-300 performs the best among those. Therefore, we use GloVe-300 as input word embeddings throughout our experiments.", |
| "cite_spans": [ |
| { |
| "start": 229, |
| "end": 249, |
| "text": "(Arora et al., 2017)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "For the above two baselines tf-idf bag-of-words and word embeddings, given conference call transcript representations, we apply Support Vector Regression (SVR) (Drucker et al., 1997) with Radial Basis Function (RBF) kernel to predict stock volatility y i , following previous studies (Kogan et al., 2009; Rekabsaz et al., 2017; Tsai and Wang, 2014) .", |
| "cite_spans": [ |
| { |
| "start": 160, |
| "end": 182, |
| "text": "(Drucker et al., 1997)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 284, |
| "end": 304, |
| "text": "(Kogan et al., 2009;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 305, |
| "end": 327, |
| "text": "Rekabsaz et al., 2017;", |
| "ref_id": "BIBREF23" |
| }, |
| { |
| "start": 328, |
| "end": 348, |
| "text": "Tsai and Wang, 2014)", |
| "ref_id": "BIBREF29" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "We also consider two multimodal learning baselines that fuse both audio and textual features.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "simple fusion This is a baseline using a simple shallow model to fuse different modalities. The audio and text features are fed into SVR as input. Using this baseline, we can compare the effectiveness of deep multimodal model with shallow multimodal model. bc-LSTM It is a state-of-the-art multimodal learning model as proposed in (Poria et al., 2017) . They present a bidirectional contextual LSTM (bc-LSTM) framework for fusing multimodal features including audio, video and text. We replicate their deep model as a direct baseline.", |
| "cite_spans": [ |
| { |
| "start": 331, |
| "end": 351, |
| "text": "(Poria et al., 2017)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "For our multimodal deep regression model (MDRM), we also evaluate three different scenarios: text-only, audio-only, and both text and audio are available text+audio.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Baselines", |
| "sec_num": "6.1" |
| }, |
| { |
| "text": "Our deep model is built and trained with Keras 8 . We apply backpropagation with stochastic gradient descent in the training, and we choose the mean square error as the loss function. We use linear activation for the final regression layer and implement ReLU activation function for the remaining layers.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Setup", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "During the experiment, we find that training with audio data is more prone to overfitting. We then implement dropout in our model. In the first stage, we set dropout as 0.5 for audio contextual BiLSTM and 0.8 for text contextual BiLSTM. In the second stage, we remove the dropout layer. For the model evaluation, randomly splitting dataset into training/validation/testing is not reasonable since we should not use later years' conference calls to predict previous years' stock volatilities. Therefore, we choose the top 80% of the data as training data and the remaining 20% as test data.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Training Setup", |
| "sec_num": "6.2" |
| }, |
| { |
| "text": "Predicting stock volatility is a rather challenging task given the noisiness of the stock markets. Following prior research, we report volatility number in the 3-th decimal. The main experiment results are shown in Table 1 . We now discuss the experiment results and several interesting findings as well as their implications to the stock markets.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 215, |
| "end": 222, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiment Results and Discussion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Multimodal Deep Regression Model is Effective. The results show that our multimodal deep regression model (MDRM) outperforms all baselines. Using both text and audio data, the model has prediction error of 1.371, 0.420, 0.300 and 0.217 for 3-days, 7-days, 15-days and 30-days following the conference call respectively. Comparing with using past volatility only, the improvement gain is as substantial as 54.1% for 3-days prediction. The improvement over other baseline methods are 19.1% (tf-idf bag-of-words), 17.8% (word embeddings), 20.4%(simple fusion) respectively for 3-days prediction. Comparing with the state-of-art baseline bc-LSTM (Poria et al., 2017) , MDRM also achieve 3.3% error reduction for 3days prediction. It is worth emphasizing the substantial improvement over simple fusion model. As our design motivation, verbal and vocal features should be modeled jointly as vocal cues either affirm or discredit the verbal message in public communication. Our deep regression model is able to capture the interplay of both modalities that a simple feature fusion model cannot.", |
| "cite_spans": [ |
| { |
| "start": 642, |
| "end": 662, |
| "text": "(Poria et al., 2017)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment Results and Discussion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Both modalities are helpful. We can also conclude from the results that multimodal features are more helpful than unimodal features (either text or audio) alone. When we predict the stock volatility 3-days following the conference call, multimodal (1.371) outperform unimodal (1.431) by 4.2%. As shown in Table 1 , MDRM (text+audio) significantly outperforms MDRM (text only) and MDRM (audio-only) model for 3-days, 7-days and 15 days stock volatility prediction. The im- provement is not statistically significant for 30days prediction, which we will explain the possible reasons later. In addition to reduced prediction error, fusing both modalities can mitigate potential overfitting problem. We find that training a deep LSTM network with audio data only can result in overfitting very quickly. In our experiment, the audio-only deep network shows a trend of over-fitting in 10 epochs. Therefore, the result that audio-only MDRM performs better than textonly MDRM (1.412 vs. 1.431) may need careful interpretation as we have to stop audio-only model training early to prevent overfitting. However, using both audio features and text features, the model usually converges in 20 epochs without over-fitting.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 305, |
| "end": 312, |
| "text": "Table 1", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiment Results and Discussion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "\u03c4 =3 \u03c4 =7 \u03c4 =15 \u03c4 =30 v", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment Results and Discussion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Some Individual Vocal Features are Important. We also design another experiment to investigate the importance of different vocal features. We examine whether the left-out of individual vocal features can affect prediction results. We follow the prior research (Jiang and Pell, 2017) to select five representative vocal features including mean pitch, standard deviation of pitch, mean intensity, number of pulses and mean HNR (Harmonic-to-Noise Ratio). Our experiment results show that without mean pitch feature, the MSE of our model increases 0.7%. The left-out of standard deviation of pitch also raises MSE by 0.65%. For mean intensity and number of pulses, MSE increases by 0.63% and 0.56% respectively. However, MSE is not changed with mean HNR being left-out.This finding is consistent with prior research in speech communication that pitch and intensity are important features when detecting a speaker's confident and doubt.", |
| "cite_spans": [ |
| { |
| "start": 260, |
| "end": 282, |
| "text": "(Jiang and Pell, 2017)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment Results and Discussion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Short-term Volatility Prediction is Hard. Our prediction results consistently show that short term volatility prediction error is much greater than long term prediction error. For example, the 3days prediction MSE of MDRM is 1.371, while the 30-days MSE is 0.217. The gain of MDRM over past volatility baseline v past diminishes from 54% (\u03c4 = 3) to 6% (\u03c4 = 30). In other words, short term volatility prediction is much more difficult than long term prediction. This phenomenon has also been extensively documented in finance and accounting literature, known as post earnings announcement drift (PEAD). Research (Ball and Brown, 1968; Bernard and Thomas, 1989) have shown that the stock price moves more significantly (volatile) in a short period of time (several trading days) following the conference call than in a long period of time (from weeks to months). Even though the absolute value of MSE is higher in short-term, the 54% improvement over baseline past volatility is still encouraging, because any information that helps to formulate realistic estimates of the volatility can be invaluable to capital market participants.", |
| "cite_spans": [ |
| { |
| "start": 611, |
| "end": 633, |
| "text": "(Ball and Brown, 1968;", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 634, |
| "end": 659, |
| "text": "Bernard and Thomas, 1989)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment Results and Discussion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Marginal Gain over Simple Models is Diminishing in Long-term. Our experiment results also consistently show that complex deep models such as bc-LSTM (Poria et al., 2017) or our proposed deep regression model outperform shallow models (such as SVR) by large margin in short-term prediction (\u03c4 =3 or 7). However, the margin becomes smaller as we predict a relative long-term stock volatility (\u03c4 =15 or 30). For example, comparing with tf-idf bag-of-words model at \u03c4 = 3, our MDRM reduces prediction error by 19.1% (1.371 vs. 1.695). However, at \u03c4 = 30, the prediction error reduction is 12.8% (0.217 vs. 0.249). This can also be confirmed that when \u03c4 = 30, the MSE of past volatility method is as small as 0.231, which is even better than tf-idf bagof-words model and is only slightly worse than MDRM. In other words, the benefit of using complex deep model for long-term volatility prediction is smaller than for short-term volatility prediction. This phenomenon can be explained by Efficientmarket hypothesis (EMH), which is a theory in financial economics that states that the stock prices only react to new information so it is impossible to predict the stock price based on historical information. Therefore, as we target for a longer time horizon, the predictive power of using the previous conference calls information becomes less significant and substantial.", |
| "cite_spans": [ |
| { |
| "start": 149, |
| "end": 169, |
| "text": "(Poria et al., 2017)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiment Results and Discussion", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Quarter 2017", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Case Study: AMD Conference Call First", |
| "sec_num": "7.1" |
| }, |
| { |
| "text": "We conduct a case study to further investigate the validity of multimodal learning for stock volatility prediction. The case study is based on the AMD (Advanced Micro Devices Inc.)'s earnings conference call in the first quarter of 2017. We qualitatively explain why multimodal features are more helpful than unimodal text features. May 1st 2017 is a bad day for AMD investors. After the company's earnings conference call, the stock price dropped by 16.1% in the post market session. The company's stock price became very volatile for the next few days. We analyze the conference call transcript with corresponding audio recording of the company's Chief Executive Officer (CEO) Dr. Lisa T. Su. Figure 2 illustrates the inconsistencies between the CEO's verbal cues and her vocal cues. We observe that there is a significant increase in mean pitch while the CEO is saying \"Overall, from a performance standpoint, the product and the customer engagements are going as we would expect\" (Case 1). While the language is positive, the mean pitch of CEO's voice increases 20% above her average mean pitch (203.39 Hz) and the mean pitch values in nearby sentences. According to prior acoustic research (Jiang and Pell, 2017) , the high mean pitch may correlate with a speaker being not confident about what he or she is talking about. A similar inconsistency also happens when the CEO is saying We have more memory bandwidth\" (Case 2).", |
| "cite_spans": [ |
| { |
| "start": 1195, |
| "end": 1217, |
| "text": "(Jiang and Pell, 2017)", |
| "ref_id": "BIBREF13" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 695, |
| "end": 703, |
| "text": "Figure 2", |
| "ref_id": "FIGREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Case Study: AMD Conference Call First", |
| "sec_num": "7.1" |
| }, |
| { |
| "text": "After the earnings conference call, it turns out that the revenue of AMD actually missed the an- alyst expectation by $0.38M. Thus, the positive words in the CEO's language is not as credible as it sounds. Using unimodal text data only, we may miss the inconsistency in verbal and vocal cues. Therefore, the multimodal learning model may capture the inter-dependency between multimodal features and better predict market reactions to earnings conference calls.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Case Study: AMD Conference Call First", |
| "sec_num": "7.1" |
| }, |
| { |
| "text": "Predicting financial risks of publicly traded companies is an essential task in financial markets. In this work, we have demonstrated that CEO's language and voice in company earnings conference calls can be utilized to predict the company financial risk level, as measured by stock price volatility for days following the conference call. We propose a BiLSTM-based multimodal deep regression model that extracts and fuses multimodal features from text transcripts and audio recordings. Even though our work is an application of financial domain, we hope our multimodal learning model can also be useful in other areas (such as social media and customer service) where multimodality data is available. Iterative 63 60 37 40 Total:123 Total:77 One-Time 33 22 67 78 Total:55 Total:145 To acquire right-segmented earnings conference calls automatically. We implement both IFA and One-Time segmentation on the remaining data, selecting the right-segmented earnings conference call by comparing the result of two methods. If the difference of segmentation result between the two methods is small in one document, we note this document as right-segmented.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 702, |
| "end": 785, |
| "text": "Iterative 63 60 37 40 Total:123 Total:77 One-Time 33 22 67 78 Total:55", |
| "ref_id": "TABREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "8" |
| }, |
| { |
| "text": "By adopting IFA on our dataset, we solve the long, noisy audio segmentation problem in an effective way. Since there is no recognized practical method to deal with such a problem, our work can contribute to those researchers who are interested in long audio processing and analyzing. Not only in financial materials analysis field but also in other areas including social media analysis and emotion recognition.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Match Not Match Begin End Begin End", |
| "sec_num": null |
| }, |
| { |
| "text": "MarketWatch website. From CIA to BIA: Spotting execs who bend the truth. Accessed: 2019-06-02", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://seekingalpha.com/ 3 https://earningscast.com/ 4 It is worth noting that some third-party data provider companies provide human-annotated transcript text and audio recording alignment. In that case, text-audio forced alignment step may not be necessary.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "https://nlp.stanford.edu/projects/glove/ 6 Our dataset is available at https://github.com/ GeminiLn/EarningsCall_Dataset", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Assuming the longest document has N sentences, for documents which contain less than N sentences, we utilize zero-padding to fill them to N to keep consistency.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Keras: https://keras.io/", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Aeneas: https://github.com/readbeyond/aeneas", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "This work was supported by Theme-based Research Scheme (No. T31-604/18-N) from Research Grants Council in Hong Kong, and the National Natural Science Foundation of China (Grant No. 71771212, U1711262). We thank the anonymous reviewers for helpful comments. Any opinions, findings, conclusions, or recommendations expressed here are those of the authors and do not necessarily reflect the view of the sponsor.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgments", |
| "sec_num": null |
| }, |
| { |
| "text": "In this appendix section, we present details of our text and audio forced alignment method. Given an audio file containing speech, and the corresponding transcript, forced alignment is defined as the process of determining, for each fragment of the transcript, the time interval (in the audio file) containing the spoken text. In our setting, we need to match speaker's speech and corresponding spoken text from an earnings conference call data.However, earnings conference call normally lasts for about one hour or longer. Therefore, aligning audio clips with the corresponding text is quite challenging.Toward this end, we propose an Iterative Forced Alignment (IFA) algorithm to promote the alignment results on our data set. The IFA method is inspired by a spoken language processing work (Moreno et al., 1998) . We implement IFA on the basis of normal forced alignment technology, inif Length(a i ) = 0 then 3:Save slice a,t as files M is the number of calls 17:while result! = T rue do 20:end for 23: end function Python, we use Aeneas 9 as fundamental forced alignment method. Algorithm 1 demonstrates the specific architecture of our method.During our experiment, we find the forced alignment performs well in the beginning and end of the whole document. In the middle parts, alignment result might be influenced by short syllable words, fast switching of speakers or omission of text record. Therefore, we utilize the iterative strategy in segmentation. Instead of aligning the whole document and then segment it according to alignment result, the IFA chooses to segment only the last paragraph at one time, since the last paragraph is most likely to be aligned precisely. After segment the last paragraph, IFA will restart the forced alignment on the remaining audio and text, generate the new alignment result and segment the last paragraph, until document is fully processed. We randomly select 200 earnings conference calls to test the effectiveness of IFA. As shown in Table 2, the adoption of IFA improves segmentation accuracy and reduces the degree of error significantly.", |
| "cite_spans": [ |
| { |
| "start": 793, |
| "end": 814, |
| "text": "(Moreno et al., 1998)", |
| "ref_id": "BIBREF19" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "A Appendices", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "A simple but tough-to-beat baseline for sentence embeddings", |
| "authors": [ |
| { |
| "first": "Sanjeev", |
| "middle": [], |
| "last": "Arora", |
| "suffix": "" |
| }, |
| { |
| "first": "Yingyu", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| }, |
| { |
| "first": "Tengyu", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of ICLR", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sanjeev Arora, Yingyu Liang, and Tengyu Ma. 2017. A simple but tough-to-beat baseline for sentence em- beddings. In In Proceedings of ICLR.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Vocal expression and perception of emotion. Current directions in psychological science", |
| "authors": [ |
| { |
| "first": "Jo-Anne", |
| "middle": [], |
| "last": "Bachorowski", |
| "suffix": "" |
| } |
| ], |
| "year": 1999, |
| "venue": "", |
| "volume": "8", |
| "issue": "", |
| "pages": "53--57", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jo-Anne Bachorowski. 1999. Vocal expression and perception of emotion. Current directions in psy- chological science, 8(2):53-57.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "An empirical evaluation of accounting income numbers", |
| "authors": [ |
| { |
| "first": "Ray", |
| "middle": [], |
| "last": "Ball", |
| "suffix": "" |
| }, |
| { |
| "first": "Philip", |
| "middle": [], |
| "last": "Brown", |
| "suffix": "" |
| } |
| ], |
| "year": 1968, |
| "venue": "Journal of accounting research", |
| "volume": "", |
| "issue": "", |
| "pages": "159--178", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ray Ball and Philip Brown. 1968. An empirical eval- uation of accounting income numbers. Journal of accounting research, pages 159-178.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "The sound of trustworthiness: Acoustic-based modulation of perceived voice personality", |
| "authors": [ |
| { |
| "first": "Pascal", |
| "middle": [], |
| "last": "Belin", |
| "suffix": "" |
| }, |
| { |
| "first": "Bibi", |
| "middle": [], |
| "last": "Boehme", |
| "suffix": "" |
| }, |
| { |
| "first": "Phil", |
| "middle": [], |
| "last": "Mcaleer", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "PloS one", |
| "volume": "12", |
| "issue": "10", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pascal Belin, Bibi Boehme, and Phil McAleer. 2017. The sound of trustworthiness: Acoustic-based mod- ulation of perceived voice personality. PloS one, 12(10):e0185651.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Equilibrium portfolio strategies in the presence of sentiment risk and excess volatility", |
| "authors": [ |
| { |
| "first": "Dumas", |
| "middle": [], |
| "last": "Bernard", |
| "suffix": "" |
| }, |
| { |
| "first": "Kurshev", |
| "middle": [], |
| "last": "Alexander", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Dumas Bernard, Kurshev Alexander, and Uppal Ra- man. 2007. Equilibrium portfolio strategies in the presence of sentiment risk and excess volatility. Working Paper 13401, National Bureau of Eco- nomic Research.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Postearnings-announcement drift: delayed price response or risk premium", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Victor", |
| "suffix": "" |
| }, |
| { |
| "first": "Jacob K", |
| "middle": [], |
| "last": "Bernard", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Thomas", |
| "suffix": "" |
| } |
| ], |
| "year": 1989, |
| "venue": "Journal of Accounting research", |
| "volume": "27", |
| "issue": "", |
| "pages": "1--36", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Victor L Bernard and Jacob K Thomas. 1989. Post- earnings-announcement drift: delayed price re- sponse or risk premium? Journal of Accounting research, 27:1-36.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Speak and unspeak with praat", |
| "authors": [ |
| { |
| "first": "Paul", |
| "middle": [], |
| "last": "Boersma", |
| "suffix": "" |
| }, |
| { |
| "first": "Vincent", |
| "middle": [], |
| "last": "Van Heuven", |
| "suffix": "" |
| } |
| ], |
| "year": 2001, |
| "venue": "Glot International", |
| "volume": "5", |
| "issue": "9", |
| "pages": "341--347", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Paul Boersma and Vincent Van Heuven. 2001. Speak and unspeak with praat. Glot International, 5(9/10):341-347.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Deep learning for event-driven stock prediction", |
| "authors": [ |
| { |
| "first": "Xiao", |
| "middle": [], |
| "last": "Ding", |
| "suffix": "" |
| }, |
| { |
| "first": "Yue", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Ting", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Junwen", |
| "middle": [], |
| "last": "Duan", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of IJCAI", |
| "volume": "", |
| "issue": "", |
| "pages": "2327--2333", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiao Ding, Yue Zhang, Ting Liu, and Junwen Duan. 2015. Deep learning for event-driven stock predic- tion. In In Proceedings of IJCAI, pages 2327-2333.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Support vector regression machines", |
| "authors": [ |
| { |
| "first": "Harris", |
| "middle": [], |
| "last": "Drucker", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [ |
| "C" |
| ], |
| "last": "Christopher", |
| "suffix": "" |
| }, |
| { |
| "first": "Linda", |
| "middle": [], |
| "last": "Burges", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [ |
| "J" |
| ], |
| "last": "Kaufman", |
| "suffix": "" |
| }, |
| { |
| "first": "Vladimir", |
| "middle": [], |
| "last": "Smola", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Vapnik", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Harris Drucker, Christopher J. C. Burges, Linda Kauf- man, Alex J. Smola, and Vladimir Vapnik. 1997. Support vector regression machines. In M. C.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Proceedings of NIPS", |
| "authors": [ |
| { |
| "first": "M", |
| "middle": [ |
| "I" |
| ], |
| "last": "Mozer", |
| "suffix": "" |
| }, |
| { |
| "first": "T", |
| "middle": [], |
| "last": "Jordan", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Petsche", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "155--161", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mozer, M. I. Jordan, and T. Petsche, editors, In Pro- ceedings of NIPS, pages 155-161.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Whodunnit? crime drama as a case for natural language understanding", |
| "authors": [ |
| { |
| "first": "Lea", |
| "middle": [], |
| "last": "Frermann", |
| "suffix": "" |
| }, |
| { |
| "first": "B", |
| "middle": [], |
| "last": "Shay", |
| "suffix": "" |
| }, |
| { |
| "first": "Mirella", |
| "middle": [], |
| "last": "Cohen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Lapata", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Transactions of the Association of Computational Linguistics", |
| "volume": "6", |
| "issue": "", |
| "pages": "1--15", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lea Frermann, Shay B Cohen, and Mirella Lapata. 2018. Whodunnit? crime drama as a case for natural language understanding. Transactions of the Associ- ation of Computational Linguistics, 6:1-15.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Analyzing speech to detect financial misreporting", |
| "authors": [ |
| { |
| "first": "L", |
| "middle": [], |
| "last": "Jessen", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Hobson", |
| "suffix": "" |
| }, |
| { |
| "first": "J", |
| "middle": [], |
| "last": "William", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohan", |
| "middle": [], |
| "last": "Mayew", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Venkatachalam", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "Journal of Accounting Research", |
| "volume": "50", |
| "issue": "2", |
| "pages": "349--392", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jessen L Hobson, William J Mayew, and Mohan Venkatachalam. 2012. Analyzing speech to detect financial misreporting. Journal of Accounting Re- search, 50(2):349-392.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Long short-term memory", |
| "authors": [ |
| { |
| "first": "Sepp", |
| "middle": [], |
| "last": "Hochreiter", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00fcrgen", |
| "middle": [], |
| "last": "Schmidhuber", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Neural computation", |
| "volume": "9", |
| "issue": "8", |
| "pages": "1735--1780", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural computation, 9(8):1735-1780.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "The sound of confidence and doubt", |
| "authors": [ |
| { |
| "first": "Xiaoming", |
| "middle": [], |
| "last": "Jiang", |
| "suffix": "" |
| }, |
| { |
| "first": "D", |
| "middle": [], |
| "last": "Marc", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Pell", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Speech Communication", |
| "volume": "88", |
| "issue": "", |
| "pages": "106--126", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiaoming Jiang and Marc D Pell. 2017. The sound of confidence and doubt. Speech Communication, 88:106-126.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Predicting risk from financial reports with regression", |
| "authors": [ |
| { |
| "first": "Shimon", |
| "middle": [], |
| "last": "Kogan", |
| "suffix": "" |
| }, |
| { |
| "first": "Dimitry", |
| "middle": [], |
| "last": "Levin", |
| "suffix": "" |
| }, |
| { |
| "first": "Bryan", |
| "middle": [ |
| "R" |
| ], |
| "last": "Routledge", |
| "suffix": "" |
| }, |
| { |
| "first": "Jacob", |
| "middle": [ |
| "S" |
| ], |
| "last": "Sagi", |
| "suffix": "" |
| }, |
| { |
| "first": "Noah", |
| "middle": [ |
| "A" |
| ], |
| "last": "Smith", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proceedings of NAACL", |
| "volume": "", |
| "issue": "", |
| "pages": "272--280", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shimon Kogan, Dimitry Levin, Bryan R. Routledge, Jacob S. Sagi, and Noah A. Smith. 2009. Predict- ing risk from financial reports with regression. In In Proceedings of NAACL, pages 272-280.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Money talks: The power of voice: A critical review of mayew and ventachalams the power of voice: Managerial affective states and future firm performance", |
| "authors": [ |
| { |
| "first": "Francisco", |
| "middle": [], |
| "last": "Lacerda", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "PERILUS", |
| "volume": "", |
| "issue": "", |
| "pages": "1--10", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Francisco Lacerda. 2012. Money talks: The power of voice: A critical review of mayew and ventachalams the power of voice: Managerial affective states and future firm performance. PERILUS, pages 1-10.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "In a nervous voice: Acoustic analysis and perception of anxiety in social phobics speech", |
| "authors": [ |
| { |
| "first": "Petri", |
| "middle": [], |
| "last": "Laukka", |
| "suffix": "" |
| }, |
| { |
| "first": "Clas", |
| "middle": [], |
| "last": "Linnman", |
| "suffix": "" |
| }, |
| { |
| "first": "Anna", |
| "middle": [], |
| "last": "Fredrik\u00e5hs", |
| "suffix": "" |
| }, |
| { |
| "first": "\u00d6rjan", |
| "middle": [], |
| "last": "Pissiota", |
| "suffix": "" |
| }, |
| { |
| "first": "Vanda", |
| "middle": [], |
| "last": "Frans", |
| "suffix": "" |
| }, |
| { |
| "first": "\u00c5sa", |
| "middle": [], |
| "last": "Faria", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Michelg\u00e5rd", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "Journal of Nonverbal Behavior", |
| "volume": "32", |
| "issue": "4", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Petri Laukka, Clas Linnman, Fredrik\u00c5hs, Anna Pis- siota,\u00d6rjan Frans, Vanda Faria,\u00c5sa Michelg\u00e5rd, Lieuwe Appel, Mats Fredrikson, and Tomas Fur- mark. 2008. In a nervous voice: Acoustic analysis and perception of anxiety in social phobics speech. Journal of Nonverbal Behavior, 32(4):195.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Utterance-based audio sentiment analysis learned by a parallel combination of cnn and lstm", |
| "authors": [ |
| { |
| "first": "Ziqian", |
| "middle": [], |
| "last": "Luo", |
| "suffix": "" |
| }, |
| { |
| "first": "Hua", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Feiyang", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1811.08065" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ziqian Luo, Hua Xu, and Feiyang Chen. 2018. Utterance-based audio sentiment analysis learned by a parallel combination of cnn and lstm. arXiv preprint arXiv:1811.08065.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "The power of voice: Managerial affective states and future firm performance", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "William", |
| "suffix": "" |
| }, |
| { |
| "first": "Mohan", |
| "middle": [], |
| "last": "Mayew", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Venkatachalam", |
| "suffix": "" |
| } |
| ], |
| "year": 2012, |
| "venue": "The Journal of Finance", |
| "volume": "67", |
| "issue": "1", |
| "pages": "1--43", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "William J Mayew and Mohan Venkatachalam. 2012. The power of voice: Managerial affective states and future firm performance. The Journal of Finance, 67(1):1-43.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "A recursive algorithm for the forced alignment of very long audio segments", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Pedro", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Moreno", |
| "suffix": "" |
| }, |
| { |
| "first": "Jean-Manuel", |
| "middle": [], |
| "last": "Joerg", |
| "suffix": "" |
| }, |
| { |
| "first": "Oren", |
| "middle": [], |
| "last": "Van Thong", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Glickman", |
| "suffix": "" |
| } |
| ], |
| "year": 1998, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pedro J Moreno, Chris Joerg, Jean-Manuel Van Thong, and Oren Glickman. 1998. A recursive algorithm for the forced alignment of very long audio segments. In In proceedings of ICSLP.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Detecting risks in the banking system by sentiment analysis", |
| "authors": [ |
| { |
| "first": "Clemens", |
| "middle": [], |
| "last": "Nopp", |
| "suffix": "" |
| }, |
| { |
| "first": "Allan", |
| "middle": [], |
| "last": "Hanbury", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "591--600", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Clemens Nopp and Allan Hanbury. 2015. Detecting risks in the banking system by sentiment analysis. In In Proceedings of EMNLP, pages 591-600, Lisbon, Portugal.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Glove: Global vectors for word representation", |
| "authors": [ |
| { |
| "first": "Jeffrey", |
| "middle": [], |
| "last": "Pennington", |
| "suffix": "" |
| }, |
| { |
| "first": "Richard", |
| "middle": [], |
| "last": "Socher", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [ |
| "D" |
| ], |
| "last": "Manning", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1532--1543", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jeffrey Pennington, Richard Socher, and Christo- pher D. Manning. 2014. Glove: Global vectors for word representation. In In Proceedings of EMNLP, pages 1532-1543.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Context-dependent sentiment analysis in user-generated videos", |
| "authors": [ |
| { |
| "first": "Soujanya", |
| "middle": [], |
| "last": "Poria", |
| "suffix": "" |
| }, |
| { |
| "first": "Erik", |
| "middle": [], |
| "last": "Cambria", |
| "suffix": "" |
| }, |
| { |
| "first": "Devamanyu", |
| "middle": [], |
| "last": "Hazarika", |
| "suffix": "" |
| }, |
| { |
| "first": "Navonil", |
| "middle": [], |
| "last": "Majumder", |
| "suffix": "" |
| }, |
| { |
| "first": "Amir", |
| "middle": [], |
| "last": "Zadeh", |
| "suffix": "" |
| }, |
| { |
| "first": "Louis-Philippe", |
| "middle": [], |
| "last": "Morency", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of ACL", |
| "volume": "1", |
| "issue": "", |
| "pages": "873--883", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Soujanya Poria, Erik Cambria, Devamanyu Hazarika, Navonil Majumder, Amir Zadeh, and Louis-Philippe Morency. 2017. Context-dependent sentiment anal- ysis in user-generated videos. In In Proceedings of ACL, volume 1, pages 873-883.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Volatility prediction using financial disclosures sentiments with word embedding-based ir models", |
| "authors": [ |
| { |
| "first": "Navid", |
| "middle": [], |
| "last": "Rekabsaz", |
| "suffix": "" |
| }, |
| { |
| "first": "Mihai", |
| "middle": [], |
| "last": "Lupu", |
| "suffix": "" |
| }, |
| { |
| "first": "Artem", |
| "middle": [], |
| "last": "Baklanov", |
| "suffix": "" |
| }, |
| { |
| "first": "Allan", |
| "middle": [], |
| "last": "Hanbury", |
| "suffix": "" |
| }, |
| { |
| "first": "Alexander", |
| "middle": [], |
| "last": "Duer", |
| "suffix": "" |
| }, |
| { |
| "first": "Linda", |
| "middle": [], |
| "last": "Anderson", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "1712--1721", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Navid Rekabsaz, Mihai Lupu, Artem Baklanov, Al- lan Hanbury, Alexander Duer, and Linda Ander- son. 2017. Volatility prediction using financial dis- closures sentiments with word embedding-based ir models. In Proceedings of ACL, pages 1712-1721.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Self-reported symptoms of depression and ptsd are associated with reduced vowel space in screening interviews", |
| "authors": [ |
| { |
| "first": "Stefan", |
| "middle": [], |
| "last": "Scherer", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Gale", |
| "suffix": "" |
| }, |
| { |
| "first": "Jonathan", |
| "middle": [], |
| "last": "Lucas", |
| "suffix": "" |
| }, |
| { |
| "first": "Albert", |
| "middle": [ |
| "Skip" |
| ], |
| "last": "Gratch", |
| "suffix": "" |
| }, |
| { |
| "first": "Louis-Philippe", |
| "middle": [], |
| "last": "Rizzo", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Morency", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "IEEE Transactions on Affective Computing", |
| "volume": "7", |
| "issue": "1", |
| "pages": "59--73", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Stefan Scherer, Gale M Lucas, Jonathan Gratch, Al- bert Skip Rizzo, and Louis-Philippe Morency. 2016. Self-reported symptoms of depression and ptsd are associated with reduced vowel space in screening in- terviews. IEEE Transactions on Affective Comput- ing, 7(1):59-73.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Paraverbal indicators of deception: A meta-analytic synthesis", |
| "authors": [ |
| { |
| "first": "Siegfried", |
| "middle": [ |
| "Ludwig" |
| ], |
| "last": "Sporer", |
| "suffix": "" |
| }, |
| { |
| "first": "Barbara", |
| "middle": [], |
| "last": "Schwandt", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Applied Cognitive Psychology: The Official Journal of the Society for Applied Research in Memory and Cognition", |
| "volume": "20", |
| "issue": "4", |
| "pages": "421--446", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Siegfried Ludwig Sporer and Barbara Schwandt. 2006. Paraverbal indicators of deception: A meta-analytic synthesis. Applied Cognitive Psychology: The Offi- cial Journal of the Society for Applied Research in Memory and Cognition, 20(4):421-446.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Giving content to investor sentiment: The role of media in the stock market", |
| "authors": [ |
| { |
| "first": "C", |
| "middle": [], |
| "last": "Paul", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Tetlock", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Journal of Finance", |
| "volume": "62", |
| "issue": "3", |
| "pages": "1139--1168", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Paul C. Tetlock. 2007. Giving content to investor sen- timent: The role of media in the stock market. Jour- nal of Finance, 62(3):1139-1168.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Word embeddings-based uncertainty detection in financial disclosures", |
| "authors": [ |
| { |
| "first": "Sanja", |
| "middle": [], |
| "last": "Christoph Kilian Theil", |
| "suffix": "" |
| }, |
| { |
| "first": "Heiner", |
| "middle": [], |
| "last": "Stajner", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Stuckenschmidt", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proceedings of the First Workshop on Economics and Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "32--37", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Christoph Kilian Theil, Sanja Stajner, and Heiner Stuckenschmidt. 2018. Word embeddings-based un- certainty detection in financial disclosures. In In Proceedings of the First Workshop on Economics and Natural Language Processing, pages 32-37.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Combating human trafficking with multimodal deep models", |
| "authors": [ |
| { |
| "first": "Edmund", |
| "middle": [], |
| "last": "Tong", |
| "suffix": "" |
| }, |
| { |
| "first": "Amir", |
| "middle": [], |
| "last": "Zadeh", |
| "suffix": "" |
| }, |
| { |
| "first": "Cara", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Louis-Philippe", |
| "middle": [], |
| "last": "Morency", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of ACL", |
| "volume": "", |
| "issue": "", |
| "pages": "1547--1556", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Edmund Tong, Amir Zadeh, Cara Jones, and Louis- Philippe Morency. 2017. Combating human traf- ficking with multimodal deep models. In In Pro- ceedings of ACL, pages 1547-1556.", |
| "links": null |
| }, |
| "BIBREF29": { |
| "ref_id": "b29", |
| "title": "Financial keyword expansion via continuous word vector representations", |
| "authors": [ |
| { |
| "first": "Ming-Feng", |
| "middle": [], |
| "last": "Tsai", |
| "suffix": "" |
| }, |
| { |
| "first": "Chuan-Ju", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of EMNLP", |
| "volume": "", |
| "issue": "", |
| "pages": "1453--1458", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ming-Feng Tsai and Chuan-Ju Wang. 2014. Financial keyword expansion via continuous word vector rep- resentations. In In Proceedings of EMNLP, pages 1453-1458.", |
| "links": null |
| }, |
| "BIBREF30": { |
| "ref_id": "b30", |
| "title": "A semiparametric gaussian copula regression model for predicting financial risks from earnings calls", |
| "authors": [ |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "William", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhenhao", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Hua", |
| "suffix": "" |
| } |
| ], |
| "year": 2014, |
| "venue": "Proceedings of ACL", |
| "volume": "1", |
| "issue": "", |
| "pages": "1155--1165", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "William Yang Wang and Zhenhao Hua. 2014. A semi- parametric gaussian copula regression model for predicting financial risks from earnings calls. In In Proceedings of ACL, volume 1, pages 1155-1165.", |
| "links": null |
| }, |
| "BIBREF31": { |
| "ref_id": "b31", |
| "title": "Image captioning with semantic attention", |
| "authors": [ |
| { |
| "first": "Quanzeng", |
| "middle": [], |
| "last": "You", |
| "suffix": "" |
| }, |
| { |
| "first": "Hailin", |
| "middle": [], |
| "last": "Jin", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhaowen", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Chen", |
| "middle": [], |
| "last": "Fang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiebo", |
| "middle": [], |
| "last": "Luo", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the IEEE conference on computer vision and pattern recognition", |
| "volume": "", |
| "issue": "", |
| "pages": "4651--4659", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Quanzeng You, Hailin Jin, Zhaowen Wang, Chen Fang, and Jiebo Luo. 2016. Image captioning with seman- tic attention. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 4651-4659.", |
| "links": null |
| }, |
| "BIBREF32": { |
| "ref_id": "b32", |
| "title": "Mosi: multimodal corpus of sentiment intensity and subjectivity analysis in online opinion videos", |
| "authors": [ |
| { |
| "first": "Amir", |
| "middle": [], |
| "last": "Zadeh", |
| "suffix": "" |
| }, |
| { |
| "first": "Rowan", |
| "middle": [], |
| "last": "Zellers", |
| "suffix": "" |
| }, |
| { |
| "first": "Eli", |
| "middle": [], |
| "last": "Pincus", |
| "suffix": "" |
| }, |
| { |
| "first": "Louis-Philippe", |
| "middle": [], |
| "last": "Morency", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1606.06259" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Amir Zadeh, Rowan Zellers, Eli Pincus, and Louis- Philippe Morency. 2016a. Mosi: multimodal cor- pus of sentiment intensity and subjectivity anal- ysis in online opinion videos. arXiv preprint arXiv:1606.06259.", |
| "links": null |
| }, |
| "BIBREF33": { |
| "ref_id": "b33", |
| "title": "Multimodal sentiment intensity analysis in videos: Facial gestures and verbal messages", |
| "authors": [ |
| { |
| "first": "Amir", |
| "middle": [], |
| "last": "Zadeh", |
| "suffix": "" |
| }, |
| { |
| "first": "Rowan", |
| "middle": [], |
| "last": "Zellers", |
| "suffix": "" |
| }, |
| { |
| "first": "Eli", |
| "middle": [], |
| "last": "Pincus", |
| "suffix": "" |
| }, |
| { |
| "first": "Louis-Philippe", |
| "middle": [], |
| "last": "Morency", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "IEEE Intelligent Systems", |
| "volume": "31", |
| "issue": "6", |
| "pages": "82--88", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Amir Zadeh, Rowan Zellers, Eli Pincus, and Louis- Philippe Morency. 2016b. Multimodal sentiment in- tensity analysis in videos: Facial gestures and verbal messages. IEEE Intelligent Systems, 31(6):82-88.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF1": { |
| "num": null, |
| "text": "The proposed Multimodal Deep Regression Model (MDRM). The inputs to the model is a company's conference call audio file with correpsonding transcript. Each conference call consists of N sentences. The output variable is a numerical value, i.e., the company's stock price volatility following the conference call.", |
| "uris": null, |
| "type_str": "figure" |
| }, |
| "FIGREF2": { |
| "num": null, |
| "text": "The change of Mean Pitch around specific sentence. Sentence with number 0 is the corresponding Case1 and Case2 sentence described in the paper.", |
| "uris": null, |
| "type_str": "figure" |
| }, |
| "TABREF1": { |
| "num": null, |
| "html": null, |
| "content": "<table/>", |
| "text": "Comparison of Iterative Segmentation and One-Time Segmentation", |
| "type_str": "table" |
| } |
| } |
| } |
| } |