| { |
| "paper_id": "2021", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T14:15:18.466102Z" |
| }, |
| "title": "When does text prediction benefit from additional context? An exploration of contextual signals for chat and email messages", |
| "authors": [ |
| { |
| "first": "Stojan", |
| "middle": [], |
| "last": "Trajanovski", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Chad", |
| "middle": [], |
| "last": "Atalla", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "chatalla@microsoft.com" |
| }, |
| { |
| "first": "Microsoft", |
| "middle": [], |
| "last": "Kunho", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Kim", |
| "middle": [], |
| "last": "Microsoft", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| }, |
| { |
| "first": "Vipul", |
| "middle": [], |
| "last": "Agarwal", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "vipulag@microsoft.com" |
| }, |
| { |
| "first": "Microsoft", |
| "middle": [], |
| "last": "Milad", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "milads@microsoft.com" |
| }, |
| { |
| "first": "Shokouhi", |
| "middle": [], |
| "last": "Microsoft", |
| "suffix": "", |
| "affiliation": {}, |
| "email": "" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Email and chat communication tools are increasingly important for completing daily tasks. Accurate real-time phrase completion can save time and bolster productivity. Modern text prediction algorithms are based on large language models which typically rely on the prior words in a message to predict a completion. We examine how additional contextual signals (from previous messages, time, and subject) affect the performance of a commercial text prediction model. We compare contextual text prediction in chat and email messages from two of the largest commercial platforms Microsoft Teams and Outlook, finding that contextual signals contribute to performance differently between these scenarios. On emails, time context is most beneficial with small relative gains of 2% over baseline. Whereas, in chat scenarios, using a tailored set of previous messages as context yields relative improvements over the baseline between 9.3% and 18.6% across various critical serviceoriented text prediction metrics.", |
| "pdf_parse": { |
| "paper_id": "2021", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Email and chat communication tools are increasingly important for completing daily tasks. Accurate real-time phrase completion can save time and bolster productivity. Modern text prediction algorithms are based on large language models which typically rely on the prior words in a message to predict a completion. We examine how additional contextual signals (from previous messages, time, and subject) affect the performance of a commercial text prediction model. We compare contextual text prediction in chat and email messages from two of the largest commercial platforms Microsoft Teams and Outlook, finding that contextual signals contribute to performance differently between these scenarios. On emails, time context is most beneficial with small relative gains of 2% over baseline. Whereas, in chat scenarios, using a tailored set of previous messages as context yields relative improvements over the baseline between 9.3% and 18.6% across various critical serviceoriented text prediction metrics.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Email and chat communication tools are increasingly important for completing daily professional and personal tasks. Given the recent pandemic and shift to remote work, this usage has surged. The number of daily active users in Microsoft Teams, the largest business communication and chat platform, has increased from 20 million (2019, pre-pandemic) to more than 115 million in October (2020). On the other hand, email continues to be the crucial driver for formal communication showing ever increasing usage. Providing real-time suggestions for word or phrase auto-completions is known as text prediction. The efficiency of these communications is enhanced by suggesting highly accurate text predictions with low latency. Text prediction services have been deployed across popular communication tools and platforms such as (Microsoft Text Predictions, 2020) or GMail Smart Compose (Chen et al., 2019) .", |
| "cite_spans": [ |
| { |
| "start": 823, |
| "end": 857, |
| "text": "(Microsoft Text Predictions, 2020)", |
| "ref_id": null |
| }, |
| { |
| "start": 881, |
| "end": 900, |
| "text": "(Chen et al., 2019)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Modern text prediction algorithms are based on large language models and generally rely on the prefix of a message (characters typed until cursor position) to create predictions. We study to what extent additional contextual signals improve text predictions in chat and email messages in two of the largest commercial communication platforms: Microsoft Teams and Outlook. Our contributions are summarized as:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We demonstrate that prior-message context provides the greatest lift in the Teams (chat) scenario. A 5 minute window of prior messages from both senders works the best, with relative gains from 9.3% up to 18.6% across key metrics (total match and estimated characters accepted). This 5 minute window of prior messages from both senders outperforms the corresponding 2 and 10 minute scenarios.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We find that context about message composition time provides the largest gains for the Outlook (email) scenario, while adding the subject as context only marginally helps. These relative gains are moderate (2-3% across various metrics).", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "\u2022 We conclude that the different characteristics of chat and email messages impede domain transfer. The best contextual text prediction models are custom trained for each scenario, using the most impactful subset of contextual signals.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The remainder of the paper is organized as follows.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "We give an overview of state-of-the-art related research in Section 2. More details on the signals used for contextualization are provided in Section 3. Section 4 provides information on the language model, performance metrics, and statistical details about the data. Experiment results and comparisons are presented in Section 5. We conclude in Section 6. Ethical considerations on the data and processes are discussed in Section 7.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Text prediction services have been applied for various applications, including text editor (Darragh et al., 1990) , query autocompletion on search engine (Bast and Weber, 2006; Bar-Yossef and Kraus, 2011) , mobile virtual keyboard (Hard et al., 2018) . Recently prediction service is applied on communication tools for composing email and chat messages to improve user writing productivity (Kannan et al., 2016; Deb et al., 2019; Chen et al., 2019; Microsoft Text Predictions, 2020) .", |
| "cite_spans": [ |
| { |
| "start": 91, |
| "end": 113, |
| "text": "(Darragh et al., 1990)", |
| "ref_id": "BIBREF5" |
| }, |
| { |
| "start": 154, |
| "end": 176, |
| "text": "(Bast and Weber, 2006;", |
| "ref_id": "BIBREF1" |
| }, |
| { |
| "start": 177, |
| "end": 204, |
| "text": "Bar-Yossef and Kraus, 2011)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 231, |
| "end": 250, |
| "text": "(Hard et al., 2018)", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 390, |
| "end": 411, |
| "text": "(Kannan et al., 2016;", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 412, |
| "end": 429, |
| "text": "Deb et al., 2019;", |
| "ref_id": "BIBREF6" |
| }, |
| { |
| "start": 430, |
| "end": 448, |
| "text": "Chen et al., 2019;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 449, |
| "end": 482, |
| "text": "Microsoft Text Predictions, 2020)", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "To predict correct text continuation, such applications leverage efficient lookups with pregenerated candidates, using most popular candidates (MPC) (Bar-Yossef and Kraus, 2011) , or using large-scale language models (Bengio et al., 2003) . State-of-the-art language models (Jozefowicz et al., 2016; Mnih and Hinton, 2009; Melis et al., 2018) rely on the most recent deep learning architectures, including large LSTMs (Hochreiter and Schmidhuber, 1997) or transformers (Vaswani et al., 2017) , while prior approaches involve n-gram modeling (Kneser and Ney, 1995; James, 2000; Bickel et al., 2005) .", |
| "cite_spans": [ |
| { |
| "start": 149, |
| "end": 177, |
| "text": "(Bar-Yossef and Kraus, 2011)", |
| "ref_id": "BIBREF0" |
| }, |
| { |
| "start": 217, |
| "end": 238, |
| "text": "(Bengio et al., 2003)", |
| "ref_id": "BIBREF2" |
| }, |
| { |
| "start": 274, |
| "end": 299, |
| "text": "(Jozefowicz et al., 2016;", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 300, |
| "end": 322, |
| "text": "Mnih and Hinton, 2009;", |
| "ref_id": "BIBREF22" |
| }, |
| { |
| "start": 323, |
| "end": 342, |
| "text": "Melis et al., 2018)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 418, |
| "end": 452, |
| "text": "(Hochreiter and Schmidhuber, 1997)", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 469, |
| "end": 491, |
| "text": "(Vaswani et al., 2017)", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 541, |
| "end": 563, |
| "text": "(Kneser and Ney, 1995;", |
| "ref_id": "BIBREF18" |
| }, |
| { |
| "start": 564, |
| "end": 576, |
| "text": "James, 2000;", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 577, |
| "end": 597, |
| "text": "Bickel et al., 2005)", |
| "ref_id": "BIBREF3" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "In this work, we focus on the application of text prediction on production-level online communication tools, to help users compose emails (Chen et al., 2019; Microsoft Text Predictions, 2020) , and in addition chat messages. In particular, we focus on examining useful contextual signals to give more accurate predicted text, using time, subject, and prior messages. Various contextualization techniques (e.g., hierarchical RNNs) have been applied to add useful additional signals such as preceding web interaction, linking pages, similar search queries or visitor interests of a page (White et al., 2009) ; previous sequence of utterances (Park et al., 2018; Zhang et al., 2018; Yoo et al., 2020) or related text snippets (Ke et al., 2018) .", |
| "cite_spans": [ |
| { |
| "start": 138, |
| "end": 157, |
| "text": "(Chen et al., 2019;", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 158, |
| "end": 191, |
| "text": "Microsoft Text Predictions, 2020)", |
| "ref_id": null |
| }, |
| { |
| "start": 585, |
| "end": 605, |
| "text": "(White et al., 2009)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 640, |
| "end": 659, |
| "text": "(Park et al., 2018;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 660, |
| "end": 679, |
| "text": "Zhang et al., 2018;", |
| "ref_id": "BIBREF28" |
| }, |
| { |
| "start": 680, |
| "end": 697, |
| "text": "Yoo et al., 2020)", |
| "ref_id": "BIBREF27" |
| }, |
| { |
| "start": 723, |
| "end": 740, |
| "text": "(Ke et al., 2018)", |
| "ref_id": "BIBREF16" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Related work", |
| "sec_num": "2" |
| }, |
| { |
| "text": "We examine several signals accompanying the main message text: message compose time, subject, and previous messages. We combine these signals with the message body into a single \"contextualized\" string, using special tokens to separate signals, as shown in Figure 1a . This approach is inspired by (Chen et al., 2019) , as they showed that concatenating contextual signals into a single input string gave a comparable result to more complex methods encoding these signals separately 1 . The remainder of this section explains details about each contextual signal we use.", |
| "cite_spans": [ |
| { |
| "start": 298, |
| "end": 317, |
| "text": "(Chen et al., 2019)", |
| "ref_id": "BIBREF4" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 257, |
| "end": 266, |
| "text": "Figure 1a", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Contextualization concepts", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Time Composition time is a contextual signal which can provide added value for text prediction, enabling suggestions with relevant date-time words, like \"weekend\", \"tonight\". We encode local date and time, as shown in Figure 1a , and use <BOT> and <EOT> to separate from other signals.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 218, |
| "end": 227, |
| "text": "Figure 1a", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Contextualization concepts", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Subject Message subjects often contain the purpose or summarized information of a message. In the email scenario, we use subject as context. In the chat scenario, subject is not available, so we use the chat window name as a proxy for subject (can be auto-generated or manually set by users). In both cases, the subject context is wrapped with <BOU> and <EOU> special tokens.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Contextualization concepts", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Previous email messages Previous messages can provide valuable background information which influences the text of the current message being composed. In the email case, we create pairs of messages and replies. These pairs are concatenated with a <COT> special token to create a single contextual string. In cases where the email was the first in a thread, the prior email context is left blank.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Contextualization concepts", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Previous chat messages Prior message contextualization for chat scenario is much more complex. Chat conversations typically consist of many small messages sent in quick succession. Given the email and chat message length statistics in Section 4, we expect chat messages to be about 10\u00d7 smaller than emails. So, we limit chat histories to 20 messages, which is roughly equivalent to an email-reply pair in length. Among these prior messages, any number and any order could be from the current sender, or the other participant.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Contextualization concepts", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We segment chat histories by message blocks and time windows. A series of uninterrupted messages sent by one sender is considered as a single message block. Messages sent within the past N minutes are within a time window, which enforces recency as a proxy for relevance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Contextualization concepts", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We define three prior message context aggregation modes in the chat scenario (visualized in (iii) Both-Senders: chat messages from both senders, in the past N minutes. When the sender turn changes, strings are separated by a space or a special token <COT>.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Contextualization concepts", |
| "sec_num": "3" |
| }, |
| { |
| "text": "For each mode, we consider time windows of N = {2, 5, 10} minutes. Our model training depends on real messages from two of the largest commercial communication platforms Microsoft Teams and Outlook; this involves a multi-pronged system for ensuring our customers' privacy. We work within rigorous privacy rules, using tools with privacy features built in, and preprocessing all data through multiple privacy precautions before it is digested by our models. User data from our communication platforms is never visible to humans for analysis, in any raw or preprocessed format. We run this data through our pipelines and are only able to view resulting text prediction metrics. Section 7 contains more details about these privacy precautions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Contextualization concepts", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We sample Teams data from more than 3.8 billion curated one-on-one chat messages that span 6 months (say May -October 2020), followed by privacy precautions and noise filters. The data is sorted by time and split into train, validation, and test sets in non-overlapping time periods. We use over 90% of the data for training, holding out 75,000 samples for validation and 25,000 samples for testing. Each message is recorded in its respective dataset along with all associated context. In a statistical analysis of the chat message lengths (see Figure 2 , blue box) we find that mean tokens number is 9.15 (length in characters is 48), while median tokens number is 6 (with character length 31). Therefore, when iterating characterby-character through the messages, as done in inference for text predictions, the test set has over 1M evaluation points (resampled periodically, see Section 7.1).", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 545, |
| "end": 553, |
| "text": "Figure 2", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Chat messages", |
| "sec_num": null |
| }, |
| { |
| "text": "In email experiments, we use approximately 150 million Outlook commercial emails from a period of 6 months, which go through the same privacy precautions mentioned above and in Section 7. The emails are then sorted, filtered for noise, and cut into train, validation, and test sets by their date ranges. A statistical analysis of email lengths (see Figure 2 , green box) reveals that mean number of tokens is 94 (with length in char-acters being 561), while the median is 53 tokens (and 316 characters). This is roughly 10\u00d7 larger than chat messages. When splitting train, test, and validation sets, over 90% of the data is allocated to the training set. The test set is subsampled to 3,000 emails (unlike the 25,000 messages for the chat test set) since this roughly leads to final contextualized datasets of the same size. Each resulting test set contains just over 1 million evaluation points, as in the chat setting. Additionally, we use the Avocado dataset as a publicly available dataset, which consists of emails from 279 accounts of a defunct IT company referred to as \"Avocado\" see details in (Oard et al., 2015) , for debugging and validation, allowing us to directly view data and outputs. This dataset is split into validation and test sets, each with roughly 3,000 emails for evaluation.", |
| "cite_spans": [ |
| { |
| "start": 1102, |
| "end": 1121, |
| "text": "(Oard et al., 2015)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 349, |
| "end": 357, |
| "text": "Figure 2", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Email messages", |
| "sec_num": null |
| }, |
| { |
| "text": "When applying the chat-specific prior-message grouping modes defined in Section 3, the number of prior messages fetched as context varies. Table 1 presents details on how many messages the different aggregation modes end up grouping. Both single-sender modes introduce smaller volumes of context than the Both-Senders mode. For example, the amount of prior messages grouped in the 5 minutes Ignore-Blocks mode is similar to the 2 minutes Both-Senders mode; where 2.5 chat messages are combined on average, and 56-59% of chat messages have at least one message as context. For emails, only around 50% have prior email context.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Prior-message aggregation statistics", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "The number of tokens per contextualized message (including current and aggregated prior messages) varies between the email scenario and various aggregation modes in the chat scenario. Figure 2 provides statistics on these aggregated message lengths. In the chat case, the Both-Senders mode with a 10 minute time window results in the largest aggregate length, with a median around 27 tokens, and mean above 40 tokens. The Respect-Blocks mode does not show significant length increases as the time window grows, due to the message block boundary limits. For emails, the median total tokens remains similar regardless of including the previous message. This is because half of emails are not part of an email-reply pair.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 184, |
| "end": 190, |
| "text": "Figure", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Prior-message aggregation statistics", |
| "sec_num": "4.2" |
| }, |
| { |
| "text": "Once the message data is preprocessed and jointly encoded with contextual signals, it is passed as an input to the Language Model. The production system uses a two-layer (550, 550) LSTM (with 6000 sampled softmax size loss) which is optimized to maximize the Estimated Characters Accepted metric (described in Section 5.1). All contextualization experiments use the production model architecture as the baseline. Both baseline and contextual models are trained on 16 GPUs.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Language model", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We have conducted experiments with more complex language models (e.g., transformers, deeper LSTMs), but we use the production model in this paper as (i) its simpler architecture enables largescale low-latency text prediction serving and (ii) the goal of this work is to explore how different contextual signals add to the baseline performance.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Language model", |
| "sec_num": "4.3" |
| }, |
| { |
| "text": "We conduct experiments for both email and chat messages with individual contextual signals (time, subject, prior messages) and combinations of those.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments and results", |
| "sec_num": "5" |
| }, |
| { |
| "text": "In all experiments, we level the Suggestion Rate (SR) (number of suggestions per message), then evaluate model variant performance against the following text prediction metrics:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Performance Metrics", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "\u2022 MR: Match Rate is the ratio of the number of matched suggestions and the total number of generated suggestions. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Performance Metrics", |
| "sec_num": "5.1" |
| }, |
| { |
| "text": "The performance results for chat messages from Microsoft Teams compared to the non-contextual baseline model are shown in Table 2 . For comparability, we train the model's confidence threshold to level each model's suggestion rate (SR) at 0.5 suggestions / message. Contextualization with just the chat window name (subject) yields moderate gains, possibly because the typically short chat messages are so sparse on context that a chat topic name, or participant names from a chat header, provides a starting foothold for relevance. In contrast, from the last table rows, we see that the benefits from subject context diminish once prior messages are used as a context, suggesting that the subject proxy is much weaker than prior message context. Table 2 also shows that compose-time can act as a confounding context signal for chat messages, especially in experiments with no prior messages as a context. This is possibly due to the numerically-heavy time encoding confusing the model in contrast to the short text of chat messages. The experiments also show that the benefits of these contextual signals are not additive.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 122, |
| "end": 129, |
| "text": "Table 2", |
| "ref_id": "TABREF3" |
| }, |
| { |
| "start": 747, |
| "end": 759, |
| "text": "Table 2 also", |
| "ref_id": "TABREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments with chat messages", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "All three prior message aggregation modes (Ignore-Blocks, Respect-Blocks, and Both-Senders) show gains across all performance metrics, with all time window sizes. Both-Senders mode achieves the most significant relative gains: above 9.3% for Match Rate and the Total Matches; more than 11.7% for the character match and character match per suggestion; and more than 18.6% for the characters saved per suggestion and character acceptance. This indicates that messages from the other sender provide significant value, when used with a welltuned time window. It provides relevant conversation context from all senders, eliminating confusing gaps between messages, and enables suggestions in response to questions posed by the other sender. In particular, the Ignore-Blocks mode does worse than Both-Senders, since Ignore-Blocks can violate conversation continuity, including messages [k, k + 2] from the current sender, and skipping message k + 1 from the other sender.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments with chat messages", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "For the single-sender modes, Respect-Blocks generally performs slightly worse as it utilizes only part of the messages taken by the Ignore-Blocks mode. This indicates that seeing a longer prefix of the current message block (more similar to writing a long email) makes an impact on text prediction in chat messages. Lastly, we observe that a 5 minute time window works better than 2 and 10 minute time windows. Shorter time windows seem to miss important prior context while a larger windows lead Table 4 : Avocado test set (Oard et al., 2015) messages experiment results for various contextualization modes. First column is experiment configuration, other columns are relative gains, over the noncontextual baseline, of performance metrics (Section 5.1) with a leveled suggestion rate of 2.5.", |
| "cite_spans": [ |
| { |
| "start": 524, |
| "end": 543, |
| "text": "(Oard et al., 2015)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 497, |
| "end": 504, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments with chat messages", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "to over-saturation of irrelevant information.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Experiments with chat messages", |
| "sec_num": "5.2" |
| }, |
| { |
| "text": "The gains from the contextualization in email messages are more moderate compared to those from chat messages. The comparison of the contextualized models with the baseline on commercial Microsoft Outlook emails and Avocado dataset are given in Table 3 and 4 respectively. For emails, the results suggest that time as a context (or time+subject in the Avocado dataset) offers most promising relative gains of 2-3%. This contrasts the observed trend from chat messages. Time is more important for emails since emails are often longer, contain greetings, farewells, and meeting requests with time-related keywords (e.g., \"tomorrow\", \"last night\", \"after the weekend\"). Additionally, numerical tokens from the time context are less likely to outnumber the message content tokens, since emails are about 10\u00d7longer than chat messages. With the chosen architecture, neither subject nor prior message context signals provide value in the email scenario. Subjects may introduce keywords, but the implemented method of encoding context and body into a single string did not demonstrate an ability to pull out those key words for suggestions. Likewise, prior message context did not benefit the email scenario. As Figure 2 shows, emails with prior messages are significantly longer than any of the chat context aggregations. Prior emails may have critical information steering the direction of an email thread, but our production-oriented metric are not significantly affected. The implemented architecture may not be strong enough to isolate and make use of those cues, instead becoming confounded by the vast influx of tokens from another sender. This emphasizes that the email and chat scenarios require different context signals, and may benefit from different underlying architectures.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 245, |
| "end": 252, |
| "text": "Table 3", |
| "ref_id": "TABREF5" |
| }, |
| { |
| "start": 1204, |
| "end": 1212, |
| "text": "Figure 2", |
| "ref_id": "FIGREF3" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments with email messages", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "Qualitative analysis with the Avocado set Given our commercial data-visibility constraints due to the privacy considerations, we perform a qualitative analysis on the public Avocado dataset (Oard et al., 2015) . Using this public data, we evaluate text predictions from one of the promising email context modes: time context. As shown in Table 5 , we use diff tools to identify when the time context model and baseline model create (i) correct suggestions, (ii) wrong suggestions, and (iii) no suggestions. We see that the time context model improves on all three columns. When directly examining cases where the time-context model renders a new correct suggestion, compared to the baseline, we observe a trend of timerelated n-grams. Words like \"tomorrow\", \"available\", \"September\" are seen more frequently in correct suggestions (see Figure 3) . The same trend is also observed in the Time+Subject model. Table 5 : Comparing text predictions of time-context model vs baselines. \"Context win\" row holds counts of cases where contextual model suggestions beat baseline suggestions.", |
| "cite_spans": [ |
| { |
| "start": 190, |
| "end": 209, |
| "text": "(Oard et al., 2015)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 338, |
| "end": 345, |
| "text": "Table 5", |
| "ref_id": null |
| }, |
| { |
| "start": 836, |
| "end": 845, |
| "text": "Figure 3)", |
| "ref_id": null |
| }, |
| { |
| "start": 907, |
| "end": 914, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Experiments with email messages", |
| "sec_num": "5.3" |
| }, |
| { |
| "text": "We study the role of context in text prediction for chat and email platforms. Testing with previous messages, subject, time as additional contextual Figure 3 : The 20 most common new suggestions triggered by the time-context model, on data points from the Avocado test set (Oard et al., 2015) where the baseline renders zero suggestions. signals, we find that the different characteristics of emails and chat messages influence the selection of contextual signals to use. Previous message contextualization leads to significant gains for chat messages from Microsoft Teams, when using an appropriate message aggregation strategy. By using a 5 minute time window and messages from both senders, we see a 9.4% relative increase in the match rate, and an 18.6% relative gain on estimated characters accepted. Chat messages are often short and can lack context about a train of thought; previous messages can bring necessary semantics to the model to provide a correct prediction. Benefits are comparatively insignificant for subject and compose time as contextual signals in chat messages. In the email scenario based on Microsoft Outlook, we find that time as a contextual signal yields the largest boost with a 2.02% relative increase on the match rate, while subject only helps in conjunction with time, and prior messages yields no improvement. More complex models may be needed to reap subject and prior message gains for emails, but the current architecture was chosen for largescale serving latency.", |
| "cite_spans": [ |
| { |
| "start": 273, |
| "end": 292, |
| "text": "(Oard et al., 2015)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 149, |
| "end": 157, |
| "text": "Figure 3", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "Future work involves exploring different encodings for contextual signals, such as utilizing hierarchical RNNs (Park et al., 2018; Yoo et al., 2020) to better capture context, or using more advanced architectures such as transformers or GPT-3.", |
| "cite_spans": [ |
| { |
| "start": 111, |
| "end": 130, |
| "text": "(Park et al., 2018;", |
| "ref_id": "BIBREF24" |
| }, |
| { |
| "start": 131, |
| "end": 148, |
| "text": "Yoo et al., 2020)", |
| "ref_id": "BIBREF27" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusions", |
| "sec_num": "6" |
| }, |
| { |
| "text": "When working with sensitive data and running a service which generates text predictions for the general public, we are responsible for preserving user privacy and serving fair and inclusive suggestions. ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Ethical Considerations", |
| "sec_num": "7" |
| }, |
| { |
| "text": "Our service framework follows the regulatory requirements of internal company-wise standards and General Data Protection Regulation (GDPR) (2018) to meet the user privacy regulations and customer premises. All customer chat and email data, from Teams and Outlook, used in this work are classified as customer content, which is not visible to humans for any purpose. Only system byproduct data, which is not linkable to specific users or groups, is obtained and viewed for quantitative evaluation. This includes internal service logs or numerical metrics (shown in Section 5.1). We also regularly re-sample training and test data due to our privacy and data retention policies, preserving similar data set sizes. We strictly use only publicly available data, such as the Avocado dataset (Oard et al., 2015) , for debugging and visible qualitative evaluation.", |
| "cite_spans": [ |
| { |
| "start": 786, |
| "end": 805, |
| "text": "(Oard et al., 2015)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Privacy considerations on user data", |
| "sec_num": "7.1" |
| }, |
| { |
| "text": "In pursuit of fair, respectful, and responsible suggestions, we employ a blocklist. This blocklist step in our text prediction system consists of a large dictionary containing denigrative, offensive, controversial, sensitive, and stereotype-prone words and phrases. Text from the message body and contextual signals serves as input to the blocklist. Then, if any word or phrase from the blocklist is found in the input, all further suggestions are suppressed for the message.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Blocklisting", |
| "sec_num": "7.2" |
| }, |
| { |
| "text": "In the email scenario, the full body and context is used for blocklist checks, resulting in a blocklist trigger rate of 47.42%. This means that 47.42% of our data points contain a blocklisted term in their input text, and we avoid triggering suggestions on those points. Naturally, this rate of blocklist triggering increases as more context is added to the pool of text being checked.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Blocklisting", |
| "sec_num": "7.2" |
| }, |
| { |
| "text": "This phenomenon introduces an added complexity to the chat scenario. A noncontextual baseline chat model would fail to trigger the blocklist on a response to an offensive statement from two messages ago. Figure 4 shows how the blocklist trigger rate varies as larger windows of chat history are used as context. We ensure that all chat models check the past 5 messages against the blocklist, no matter how many prior messages are used for text prediction inference. With 5 prior messages fed to the blocklist in chat conversations, the blocklist trigger rate is 25.08%, instead of 5.89% with no added prior messages.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 204, |
| "end": 212, |
| "text": "Figure 4", |
| "ref_id": "FIGREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Blocklisting", |
| "sec_num": "7.2" |
| }, |
| { |
| "text": "They also use subject and previous email as contexts.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "Based on observed acceptance probabilities on large-scale production traffic, users tend to accept longer suggestions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [ |
| { |
| "text": "We would like to thank the members of Microsoft Search, Assistant and Intelligence (MSAI) group for their useful comments and suggestions.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Acknowledgements", |
| "sec_num": null |
| } |
| ], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "Contextsensitive query auto-completion", |
| "authors": [ |
| { |
| "first": "Ziv", |
| "middle": [], |
| "last": "Bar", |
| "suffix": "" |
| }, |
| { |
| "first": "-Yossef", |
| "middle": [], |
| "last": "", |
| "suffix": "" |
| }, |
| { |
| "first": "Naama", |
| "middle": [], |
| "last": "Kraus", |
| "suffix": "" |
| } |
| ], |
| "year": 2011, |
| "venue": "Proc. of the 20th Intl. Conf. on World Wide Web (WWW)", |
| "volume": "", |
| "issue": "", |
| "pages": "107--116", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/1963405.1963424" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ziv Bar-Yossef and Naama Kraus. 2011. Context- sensitive query auto-completion. In Proc. of the 20th Intl. Conf. on World Wide Web (WWW), pages 107- 116.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Type less, find more: fast autocompletion search with a succinct index", |
| "authors": [ |
| { |
| "first": "Holger", |
| "middle": [], |
| "last": "Bast", |
| "suffix": "" |
| }, |
| { |
| "first": "Ingmar", |
| "middle": [], |
| "last": "Weber", |
| "suffix": "" |
| } |
| ], |
| "year": 2006, |
| "venue": "Proc. of the 29th Annual Intl. ACM Conf. on Research and Development in Information Retrieval (SIGIR)", |
| "volume": "", |
| "issue": "", |
| "pages": "364--371", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/1148170.1148234" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Holger Bast and Ingmar Weber. 2006. Type less, find more: fast autocompletion search with a succinct in- dex. In Proc. of the 29th Annual Intl. ACM Conf. on Research and Development in Information Retrieval (SIGIR), pages 364-371.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "A neural probabilistic language model", |
| "authors": [ |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "R\u00e9jean", |
| "middle": [], |
| "last": "Ducharme", |
| "suffix": "" |
| }, |
| { |
| "first": "Pascal", |
| "middle": [], |
| "last": "Vincent", |
| "suffix": "" |
| }, |
| { |
| "first": "Christian", |
| "middle": [], |
| "last": "Janvin", |
| "suffix": "" |
| } |
| ], |
| "year": 2003, |
| "venue": "Jour. of Machine Learning Research", |
| "volume": "3", |
| "issue": "", |
| "pages": "1137--1155", |
| "other_ids": { |
| "DOI": [ |
| "https://dl.acm.org/doi/10.5555/944919.944966" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yoshua Bengio, R\u00e9jean Ducharme, Pascal Vincent, and Christian Janvin. 2003. A neural probabilistic lan- guage model. Jour. of Machine Learning Research, 3:1137-1155.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Learning to complete sentences", |
| "authors": [ |
| { |
| "first": "Steffen", |
| "middle": [], |
| "last": "Bickel", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Haider", |
| "suffix": "" |
| }, |
| { |
| "first": "Tobias", |
| "middle": [], |
| "last": "Scheffer", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "European Conf. on Machine Learning (ECML)", |
| "volume": "", |
| "issue": "", |
| "pages": "497--504", |
| "other_ids": { |
| "DOI": [ |
| "https://link.springer.com/chapter/10.1007/11564096_47" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Steffen Bickel, Peter Haider, and Tobias Scheffer. 2005. Learning to complete sentences. In Euro- pean Conf. on Machine Learning (ECML), pages 497-504. Springer.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Gmail Smart Compose: Real-time Assisted Writing", |
| "authors": [ |
| { |
| "first": "Mia", |
| "middle": [], |
| "last": "Xu Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "N", |
| "middle": [], |
| "last": "Benjamin", |
| "suffix": "" |
| }, |
| { |
| "first": "Gagan", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Yuan", |
| "middle": [], |
| "last": "Bansal", |
| "suffix": "" |
| }, |
| { |
| "first": "Shuyuan", |
| "middle": [], |
| "last": "Cao", |
| "suffix": "" |
| }, |
| { |
| "first": "Justin", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jackie", |
| "middle": [], |
| "last": "Lu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yinan", |
| "middle": [], |
| "last": "Tsay", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "M", |
| "middle": [], |
| "last": "Andrew", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhifeng", |
| "middle": [], |
| "last": "Dai", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proc. of the 25th ACM SIGKDD Intl. Conf. on Knowledge Discovery & Data Mining", |
| "volume": "", |
| "issue": "", |
| "pages": "2287--2295", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/3292500.3330723" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mia Xu Chen, Benjamin N Lee, Gagan Bansal, Yuan Cao, Shuyuan Zhang, Justin Lu, Jackie Tsay, Yinan Wang, Andrew M Dai, Zhifeng Chen, et al. 2019. Gmail Smart Compose: Real-time Assisted Writing. In Proc. of the 25th ACM SIGKDD Intl. Conf. on Knowledge Discovery & Data Mining, pages 2287- 2295.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "The reactive keyboard: A predictive typing aid", |
| "authors": [ |
| { |
| "first": "John", |
| "middle": [ |
| "J" |
| ], |
| "last": "Darragh", |
| "suffix": "" |
| }, |
| { |
| "first": "Ian", |
| "middle": [ |
| "H" |
| ], |
| "last": "Witten", |
| "suffix": "" |
| }, |
| { |
| "first": "Mark", |
| "middle": [ |
| "L" |
| ], |
| "last": "James", |
| "suffix": "" |
| } |
| ], |
| "year": 1990, |
| "venue": "Computer", |
| "volume": "23", |
| "issue": "11", |
| "pages": "41--49", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/2.60879" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "John J. Darragh, Ian H. Witten, and Mark L. James. 1990. The reactive keyboard: A predictive typing aid. Computer, 23(11):41-49.", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Diversifying reply suggestions using a matching-conditional variational autoencoder", |
| "authors": [ |
| { |
| "first": "Budhaditya", |
| "middle": [], |
| "last": "Deb", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Bailey", |
| "suffix": "" |
| }, |
| { |
| "first": "Milad", |
| "middle": [], |
| "last": "Shokouhi", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proc. of Conf. of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (NAACL-HLT)", |
| "volume": "", |
| "issue": "", |
| "pages": "40--47", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/n19-2006" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Budhaditya Deb, Peter Bailey, and Milad Shokouhi. 2019. Diversifying reply suggestions using a matching-conditional variational autoencoder. In Proc. of Conf. of the North American Chapter of the Association for Computational Linguistics: Hu- man Language Technologies (NAACL-HLT), pages 40-47. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "EU data protection rules", |
| "authors": [], |
| "year": 2018, |
| "venue": "European Commission", |
| "volume": "", |
| "issue": "6", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "European Commission. 2018. EU data protection rules. https://ec.europa.eu/info/law/law-topi c/data-protection/eu-data-protection-r ules_en. Online; accessed 6 January 2021.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Federated learning for mobile keyboard prediction", |
| "authors": [ |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Hard", |
| "suffix": "" |
| }, |
| { |
| "first": "Kanishka", |
| "middle": [], |
| "last": "Rao", |
| "suffix": "" |
| }, |
| { |
| "first": "Rajiv", |
| "middle": [], |
| "last": "Mathews", |
| "suffix": "" |
| }, |
| { |
| "first": "Swaroop", |
| "middle": [], |
| "last": "Ramaswamy", |
| "suffix": "" |
| }, |
| { |
| "first": "Fran\u00e7oise", |
| "middle": [], |
| "last": "Beaufays", |
| "suffix": "" |
| }, |
| { |
| "first": "Sean", |
| "middle": [], |
| "last": "Augenstein", |
| "suffix": "" |
| }, |
| { |
| "first": "Hubert", |
| "middle": [], |
| "last": "Eichner", |
| "suffix": "" |
| }, |
| { |
| "first": "Chlo\u00e9", |
| "middle": [], |
| "last": "Kiddon", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Ramage", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1811.03604" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andrew Hard, Kanishka Rao, Rajiv Mathews, Swaroop Ramaswamy, Fran\u00e7oise Beaufays, Sean Augenstein, Hubert Eichner, Chlo\u00e9 Kiddon, and Daniel Ramage. 2018. Federated learning for mobile keyboard pre- diction. arXiv:1811.03604.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Long short-term memory", |
| "authors": [ |
| { |
| "first": "Sepp", |
| "middle": [], |
| "last": "Hochreiter", |
| "suffix": "" |
| }, |
| { |
| "first": "J\u00fcrgen", |
| "middle": [], |
| "last": "Schmidhuber", |
| "suffix": "" |
| } |
| ], |
| "year": 1997, |
| "venue": "Neural Computation", |
| "volume": "9", |
| "issue": "8", |
| "pages": "1735--1780", |
| "other_ids": { |
| "DOI": [ |
| "10.1162/neco.1997.9.8.1735" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sepp Hochreiter and J\u00fcrgen Schmidhuber. 1997. Long short-term memory. Neural Computation, 9(8):1735-1780.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Modified kneser-ney smoothing of n-gram models", |
| "authors": [ |
| { |
| "first": "Frankie", |
| "middle": [ |
| "James" |
| ], |
| "last": "", |
| "suffix": "" |
| } |
| ], |
| "year": 2000, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Frankie James. 2000. Modified kneser-ney smoothing of n-gram models. Technical report, RIACS.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "5 attributes of successful teams", |
| "authors": [ |
| { |
| "first": "Jared", |
| "middle": [], |
| "last": "Spataro", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jared Spataro. 2019. 5 attributes of successful teams. https://www.microsoft.com/en-us/micros oft-365/blog/2019/11/19/5-attributes-s uccessful-teams/. Online; accessed 6 January 2021.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Microsoft Teams reaches 115 million DAU-plus, a new daily collaboration minutes metric for Microsoft 365", |
| "authors": [ |
| { |
| "first": "Jared", |
| "middle": [], |
| "last": "Spataro", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Jared Spataro. 2020. Microsoft Teams reaches 115 mil- lion DAU-plus, a new daily collaboration minutes metric for Microsoft 365. https://www.micros oft.com/en-us/microsoft-365/blog/2020/ 10/28/microsoft-teams-reaches-115-mill ion-dau-plus-a-new-daily-collaborati on-minutes-metric-for-microsoft-365/.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Exploring the limits of language modeling", |
| "authors": [ |
| { |
| "first": "Rafal", |
| "middle": [], |
| "last": "Jozefowicz", |
| "suffix": "" |
| }, |
| { |
| "first": "Oriol", |
| "middle": [], |
| "last": "Vinyals", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Schuster", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Yonghui", |
| "middle": [], |
| "last": "Wu", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1602.02410" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Rafal Jozefowicz, Oriol Vinyals, Mike Schuster, Noam Shazeer, and Yonghui Wu. 2016. Exploring the lim- its of language modeling. arXiv:1602.02410.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Smart reply: Automated response suggestion for email", |
| "authors": [ |
| { |
| "first": "Anjuli", |
| "middle": [], |
| "last": "Kannan", |
| "suffix": "" |
| }, |
| { |
| "first": "Karol", |
| "middle": [], |
| "last": "Kurach", |
| "suffix": "" |
| }, |
| { |
| "first": "Sujith", |
| "middle": [], |
| "last": "Ravi", |
| "suffix": "" |
| }, |
| { |
| "first": "Tobias", |
| "middle": [], |
| "last": "Kaufman", |
| "suffix": "" |
| }, |
| { |
| "first": "Balint", |
| "middle": [], |
| "last": "Miklos", |
| "suffix": "" |
| }, |
| { |
| "first": "Greg", |
| "middle": [], |
| "last": "Corrado", |
| "suffix": "" |
| }, |
| { |
| "first": "Andrew", |
| "middle": [], |
| "last": "Tomkins", |
| "suffix": "" |
| }, |
| { |
| "first": "Laszlo", |
| "middle": [], |
| "last": "Lukacs", |
| "suffix": "" |
| }, |
| { |
| "first": "Marina", |
| "middle": [], |
| "last": "Ganea", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter", |
| "middle": [], |
| "last": "Young", |
| "suffix": "" |
| }, |
| { |
| "first": "Vivek", |
| "middle": [], |
| "last": "Ramavajjala", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proc. of the ACM SIGKDD Conf. on Knowledge Discovery and Data Mining (KDD)", |
| "volume": "", |
| "issue": "", |
| "pages": "955--964", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/2939672.2939801" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anjuli Kannan, Karol Kurach, Sujith Ravi, Tobias Kaufman, Balint Miklos, Greg Corrado, Andrew Tomkins, Laszlo Lukacs, Marina Ganea, Peter Young, and Vivek Ramavajjala. 2016. Smart reply: Automated response suggestion for email. In Proc. of the ACM SIGKDD Conf. on Knowledge Discovery and Data Mining (KDD), page 955-964.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Focused hierarchical RNNs for conditional sequence processing", |
| "authors": [ |
| { |
| "first": "Nan", |
| "middle": [ |
| "Rosemary" |
| ], |
| "last": "Ke", |
| "suffix": "" |
| }, |
| { |
| "first": "Alessandro", |
| "middle": [], |
| "last": "Konrad\u017co\u0142na", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhouhan", |
| "middle": [], |
| "last": "Sordoni", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Lin", |
| "suffix": "" |
| }, |
| { |
| "first": "Yoshua", |
| "middle": [], |
| "last": "Trischler", |
| "suffix": "" |
| }, |
| { |
| "first": "Joelle", |
| "middle": [], |
| "last": "Bengio", |
| "suffix": "" |
| }, |
| { |
| "first": "Laurent", |
| "middle": [], |
| "last": "Pineau", |
| "suffix": "" |
| }, |
| { |
| "first": "Christopher", |
| "middle": [], |
| "last": "Charlin", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Pal", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proc. of the", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Nan Rosemary Ke, Konrad\u017bo\u0142na, Alessandro Sor- doni, Zhouhan Lin, Adam Trischler, Yoshua Ben- gio, Joelle Pineau, Laurent Charlin, and Christopher Pal. 2018. Focused hierarchical RNNs for condi- tional sequence processing. In Proc. of the 35th", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "on Machine Learning (ICML)", |
| "authors": [ |
| { |
| "first": "", |
| "middle": [], |
| "last": "Intl", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Conf", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "80", |
| "issue": "", |
| "pages": "2554--2563", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Intl. Conf. on Machine Learning (ICML), volume 80, pages 2554-2563, Stockholm, Sweden.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Improved backing-off for m-gram language modeling", |
| "authors": [ |
| { |
| "first": "Reinhard", |
| "middle": [], |
| "last": "Kneser", |
| "suffix": "" |
| }, |
| { |
| "first": "Hermann", |
| "middle": [], |
| "last": "Ney", |
| "suffix": "" |
| } |
| ], |
| "year": 1995, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1109/ICASSP.1995.479394" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Reinhard Kneser and Hermann Ney. 1995. Improved backing-off for m-gram language modeling. In Intl.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Conf. on Acoustics, Speech, and Signal Processing", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "1", |
| "issue": "", |
| "pages": "181--184", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Conf. on Acoustics, Speech, and Signal Processing (ICASSP), volume 1, pages 181-184.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "On the state of the art of evaluation in neural language models", |
| "authors": [ |
| { |
| "first": "G\u00e1bor", |
| "middle": [], |
| "last": "Melis", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Dyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Phil", |
| "middle": [], |
| "last": "Blunsom", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "6th Intl. Conf. on Learning Representations (ICLR)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "G\u00e1bor Melis, Chris Dyer, and Phil Blunsom. 2018. On the state of the art of evaluation in neural language models. In 6th Intl. Conf. on Learning Representa- tions (ICLR), Vancouver, BC, Canada.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "Write faster using text predictions in Word", |
| "authors": [], |
| "year": 2021, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Microsoft Text Predictions. 2020. Write faster using text predictions in Word, Outlook. https://insi der.office.com/en-us/blog/text-predi ctions-in-word-outlook. Online; accessed 7 April 2021.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "A scalable hierarchical distributed language model", |
| "authors": [ |
| { |
| "first": "Andriy", |
| "middle": [], |
| "last": "Mnih", |
| "suffix": "" |
| }, |
| { |
| "first": "Geoffrey", |
| "middle": [ |
| "E" |
| ], |
| "last": "Hinton", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Advances in Neural Information Processing Systems (NeurIPS)", |
| "volume": "", |
| "issue": "", |
| "pages": "1081--1088", |
| "other_ids": { |
| "DOI": [ |
| "https://dl.acm.org/doi/10.5555/2981780.2981915" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Andriy Mnih and Geoffrey E Hinton. 2009. A scal- able hierarchical distributed language model. In Ad- vances in Neural Information Processing Systems (NeurIPS), pages 1081-1088.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Avocado research email collection LDC2015T03. Philadelphia: Linguistic Data Consortium", |
| "authors": [ |
| { |
| "first": "Douglas", |
| "middle": [], |
| "last": "Oard", |
| "suffix": "" |
| }, |
| { |
| "first": "William", |
| "middle": [], |
| "last": "Webber", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "A" |
| ], |
| "last": "Kirsch", |
| "suffix": "" |
| }, |
| { |
| "first": "Sergey", |
| "middle": [], |
| "last": "Golitsynskiy", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.35111/wqt6-jg60" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Douglas Oard, William Webber, David A. Kirsch, and Sergey Golitsynskiy. 2015. Avocado research email collection LDC2015T03. Philadelphia: Linguistic Data Consortium.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "A hierarchical latent structure for variational conversation modeling", |
| "authors": [ |
| { |
| "first": "Yookoon", |
| "middle": [], |
| "last": "Park", |
| "suffix": "" |
| }, |
| { |
| "first": "Jaemin", |
| "middle": [], |
| "last": "Cho", |
| "suffix": "" |
| }, |
| { |
| "first": "Gunhee", |
| "middle": [], |
| "last": "Kim", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proc. of the Conf. of the North American Chapter of the Association for Computational Linguistics: Human Language Technologies (NAACL-HLT)", |
| "volume": "", |
| "issue": "", |
| "pages": "1792--1801", |
| "other_ids": { |
| "DOI": [ |
| "10.18653/v1/N18-1162" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yookoon Park, Jaemin Cho, and Gunhee Kim. 2018. A hierarchical latent structure for variational conversa- tion modeling. In Proc. of the Conf. of the North American Chapter of the Association for Computa- tional Linguistics: Human Language Technologies (NAACL-HLT), pages 1792-1801, New Orleans, LA, USA. Association for Computational Linguistics.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "Attention is all you need", |
| "authors": [ |
| { |
| "first": "Ashish", |
| "middle": [], |
| "last": "Vaswani", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Niki", |
| "middle": [], |
| "last": "Parmar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jakob", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Llion", |
| "middle": [], |
| "last": "Jones", |
| "suffix": "" |
| }, |
| { |
| "first": "Aidan", |
| "middle": [ |
| "N" |
| ], |
| "last": "Gomez", |
| "suffix": "" |
| }, |
| { |
| "first": "\u0141ukasz", |
| "middle": [], |
| "last": "Kaiser", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Advances in Neural Information Processing Systems (NeurIPS)", |
| "volume": "", |
| "issue": "", |
| "pages": "5998--6008", |
| "other_ids": { |
| "DOI": [ |
| "https://dl.acm.org/doi/10.5555/3295222.3295349" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ashish Vaswani, Noam Shazeer, Niki Parmar, Jakob Uszkoreit, Llion Jones, Aidan N Gomez, \u0141ukasz Kaiser, and Illia Polosukhin. 2017. Attention is all you need. In Advances in Neural Information Pro- cessing Systems (NeurIPS), pages 5998-6008.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Predicting user interests from contextual information", |
| "authors": [ |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Ryen", |
| "suffix": "" |
| }, |
| { |
| "first": "P", |
| "middle": [], |
| "last": "White", |
| "suffix": "" |
| }, |
| { |
| "first": "Liwei", |
| "middle": [], |
| "last": "Bailey", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| } |
| ], |
| "year": 2009, |
| "venue": "Proc. of the 32nd Intl. ACM Conf. on Research and development in information retrieval (SIGIR)", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "DOI": [ |
| "10.1145/1571941.1572005" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ryen W. White, P. Bailey, and Liwei Chen. 2009. Pre- dicting user interests from contextual information. In Proc. of the 32nd Intl. ACM Conf. on Research and development in information retrieval (SIGIR).", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Variational hierarchical dialog autoencoder for dialogue state tracking data augmentation", |
| "authors": [ |
| { |
| "first": "Hanbit", |
| "middle": [], |
| "last": "Kang Min Yoo", |
| "suffix": "" |
| }, |
| { |
| "first": "Franck", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Trung", |
| "middle": [], |
| "last": "Dernoncourt", |
| "suffix": "" |
| }, |
| { |
| "first": "W", |
| "middle": [], |
| "last": "Bui", |
| "suffix": "" |
| }, |
| { |
| "first": "Sang-Goo", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proc. of the Conf. on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kang Min Yoo, Hanbit Lee, Franck Dernoncourt, Trung Bui, W. Chang, and Sang-goo Lee. 2020. Variational hierarchical dialog autoencoder for di- alogue state tracking data augmentation. In Proc. of the Conf. on Empirical Methods in Natural Lan- guage Processing (EMNLP).", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Modeling multiturn conversation with deep utterance aggregation", |
| "authors": [ |
| { |
| "first": "Zhuosheng", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jiangtong", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Pengfei", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| }, |
| { |
| "first": "Hai", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Gongshen", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2018, |
| "venue": "Proc. of the 27th Intl. Conf. on Computational Linguistics (COLING)", |
| "volume": "", |
| "issue": "", |
| "pages": "3740--3752", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Zhuosheng Zhang, Jiangtong Li, Pengfei Zhu, Hai Zhao, and Gongshen Liu. 2018. Modeling multi- turn conversation with deep utterance aggregation. In Proc. of the 27th Intl. Conf. on Computational Linguistics (COLING), pages 3740-3752, Santa Fe, New Mexico, USA. Association for Computational Linguistics.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "num": null, |
| "type_str": "figure", |
| "uris": null, |
| "text": "(a) Context extraction and encoding.(b) Aggregating a 5 min prior chat window in various context modes." |
| }, |
| "FIGREF1": { |
| "num": null, |
| "type_str": "figure", |
| "uris": null, |
| "text": "Examples of (a) context encoding pipeline and (b) chat prior message aggregation modes." |
| }, |
| "FIGREF2": { |
| "num": null, |
| "type_str": "figure", |
| "uris": null, |
| "text": ", mimicking prior email context: (i) Ignore-Blocks: chat messages from the current sender, in the past N minutes, ignoring any message block boundaries. (ii) Respect-Blocks: chat messages from the current sender, in the past N minutes, confined to the most recent message block." |
| }, |
| "FIGREF3": { |
| "num": null, |
| "type_str": "figure", |
| "uris": null, |
| "text": "Box-plot statistics: number of tokens in a context-aggregated message from Microsoft Teams and Outlook. Green diamond markers represent the mean, bold red lines are the medians, margins of the boxes are lower and upper quartiles while whiskers end-points are the minimums and maximums." |
| }, |
| "FIGREF4": { |
| "num": null, |
| "type_str": "figure", |
| "uris": null, |
| "text": "Initial blocklist trigger rates for various contextualization merging modes in Microsoft Teams chat messages." |
| }, |
| "TABREF1": { |
| "html": null, |
| "num": null, |
| "content": "<table/>", |
| "type_str": "table", |
| "text": "Microsoft Teams chat message statisticsamount of aggregated context per message." |
| }, |
| "TABREF3": { |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td>mance metrics (Section 5.1) with a leveled suggestion rate of 0.5.</td></tr><tr><td>\u2022 TM: Total Matches is the number of sugges-</td></tr><tr><td>tions which match the upcoming text.</td></tr><tr><td>\u2022 ChM: Characters Matched is the number of</td></tr><tr><td>matched characters from all suggestions.</td></tr><tr><td>\u2022 Est. ChA: Estimated Characters Accepted</td></tr><tr><td>is the estimated 2 total number of suggested</td></tr><tr><td>characters accepted by users.</td></tr></table>", |
| "type_str": "table", |
| "text": "Microsoft Teams chat messages experiment results with various contextualization modes. First column is the experiment configuration, other columns are relative gains, over the noncontextual baseline, of the perfor-" |
| }, |
| "TABREF5": { |
| "html": null, |
| "num": null, |
| "content": "<table><tr><td>Configuration / context mode</td><td>MR</td><td colspan=\"2\">ChM / sugg Est. ChS / sugg</td><td>TM</td><td>ChM</td><td>Est. ChA</td></tr><tr><td>Subject</td><td>-1.46%\u2193</td><td>-0.21%\u2193</td><td>+1.77%\u2191</td><td>-1.58%\u2193</td><td>-0.22%\u2193</td><td>+1.80%\u2191</td></tr><tr><td>Time</td><td>+0.24%\u2191</td><td>+1.59%\u2191</td><td>+4.87%\u2191</td><td>+0.20%\u2191</td><td>+1.55%\u2191</td><td>+4.75%\u2191</td></tr><tr><td>Previous Email</td><td>-3.89%\u2193</td><td>-3.50%\u2193</td><td>-2.43%\u2193</td><td>-3.85%\u2193</td><td>-3.42%\u2193</td><td>-2.43%\u2193</td></tr><tr><td>Time+Subject</td><td>+1.70%\u2191</td><td>+2.32%\u2191</td><td>+3.32%\u2191</td><td>+1.75%\u2191</td><td>+2.34%\u2191</td><td>+3.41%\u2191</td></tr></table>", |
| "type_str": "table", |
| "text": "Microsoft Outlook email messages experiment results with various contextualization modes. First column is experiment configuration, other columns are relative gains, over the noncontextual baseline, of the performance metrics (Section 5.1) with a leveled suggestion rate of 3.8." |
| } |
| } |
| } |
| } |