| { |
| "paper_id": "2022", |
| "header": { |
| "generated_with": "S2ORC 1.0.0", |
| "date_generated": "2023-01-19T10:33:44.811297Z" |
| }, |
| "title": "semiPQA: A Study on Product Question Answering over Semi-structured Data", |
| "authors": [ |
| { |
| "first": "Xiaoyu", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Amazon Alexa AI", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Gianni", |
| "middle": [], |
| "last": "Barlacchi", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Amazon Alexa AI", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Marco", |
| "middle": [], |
| "last": "Del Tredici", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Amazon Alexa AI", |
| "location": {} |
| }, |
| "email": "" |
| }, |
| { |
| "first": "Weiwei", |
| "middle": [], |
| "last": "Cheng", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Amazon Alexa AI", |
| "location": {} |
| }, |
| "email": "weiweic@amazon.com" |
| }, |
| { |
| "first": "Adria", |
| "middle": [], |
| "last": "De Gispert", |
| "suffix": "", |
| "affiliation": { |
| "laboratory": "", |
| "institution": "Amazon Alexa AI", |
| "location": {} |
| }, |
| "email": "agispert@amazon.com" |
| } |
| ], |
| "year": "", |
| "venue": null, |
| "identifiers": {}, |
| "abstract": "Product question answering (PQA) aims to automatically address customer questions to improve their online shopping experience. Current research mainly focuses on finding answers from either unstructured text, like product descriptions and user reviews, or structured knowledge bases with pre-defined schemas. Apart from the above two sources, a lot of product information is represented in a semistructured way, e.g., key-value pairs, lists, tables, json and xml files, etc. These semistructured data can be a valuable answer source since they are better organized than free text, while being easier to construct than structured knowledge bases. However, little attention has been paid to them. To fill in this blank, here we study how to effectively incorporate semi-structured answer sources for PQA and focus on presenting answers in a natural, fluent sentence. To this end, we present semiPQA: a dataset to benchmark PQA over semi-structured data. It contains 11,243 written questions about json-formatted data covering 320 unique attribute types. Each data point is paired with manually-annotated text that describes its contents, so that we can train a neural answer presenter to present the data in a natural way. We provide baseline results and a deep analysis on the successes and challenges of leveraging semi-structured data for PQA. In general, state-of-the-art neural models can perform remarkably well when dealing with seen attribute types. For unseen attribute types, however, a noticeable drop is observed for both answer presentation and attribute ranking.", |
| "pdf_parse": { |
| "paper_id": "2022", |
| "_pdf_hash": "", |
| "abstract": [ |
| { |
| "text": "Product question answering (PQA) aims to automatically address customer questions to improve their online shopping experience. Current research mainly focuses on finding answers from either unstructured text, like product descriptions and user reviews, or structured knowledge bases with pre-defined schemas. Apart from the above two sources, a lot of product information is represented in a semistructured way, e.g., key-value pairs, lists, tables, json and xml files, etc. These semistructured data can be a valuable answer source since they are better organized than free text, while being easier to construct than structured knowledge bases. However, little attention has been paid to them. To fill in this blank, here we study how to effectively incorporate semi-structured answer sources for PQA and focus on presenting answers in a natural, fluent sentence. To this end, we present semiPQA: a dataset to benchmark PQA over semi-structured data. It contains 11,243 written questions about json-formatted data covering 320 unique attribute types. Each data point is paired with manually-annotated text that describes its contents, so that we can train a neural answer presenter to present the data in a natural way. We provide baseline results and a deep analysis on the successes and challenges of leveraging semi-structured data for PQA. In general, state-of-the-art neural models can perform remarkably well when dealing with seen attribute types. For unseen attribute types, however, a noticeable drop is observed for both answer presentation and attribute ranking.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Abstract", |
| "sec_num": null |
| } |
| ], |
| "body_text": [ |
| { |
| "text": "Product question answering (PQA) is playing an increasingly important role in e-commerce platforms. It is able to greatly improve the online shopping experience since customers do not need to traverse over the detailed web pages to seek information themselves. Traditional approaches built structured knowledge bases for product attributes and mapped customer questions into executable queries (Frank et al., 2007; Tapeh and Rahgozar, 2008; Hui et al., 2013; Li et al., 2019) . In recent years, with the rapid progress of large-scaled pretrained neural models, many research works have achieved promising results by leveraging only unstructured text, like product descriptions, user reviews and community answers (Cui et al., 2017; Gupta et al., 2019; Gao et al., 2019; Zhang et al., 2020) . Lying between these two source types, a lot of product information is often organized in a semistructured form, e.g., key-value pairs, lists and tables from product web pages, json and xml files from internal databases, etc. These semi-structured data can be a valuable answer source since they are better organized and more precise than free text, while being much cheaper to maintain than structured knowledge bases. Nonetheless, few research works have ever considered them and there is no public available dataset for its study. This paper aims to fill in this blank and study how to effectively incorporate semi-structured answer sources for PQA and present answers in a natural sentence. To this end, we construct a dataset to benchmark this study. It contains 11,243 product questions about json-formatted semi-structured data 1 . The data contains 320 unique attribute types (size, material, color, etc) spanning a diverse set of semi-structured forms like key-value pairs, lists and hierarchies. Each data is paired with manually annotated text that describes its contents. Table 1 shows some examples from the dataset. Given a question, there are two steps we need to get an answer: (1). Attribute ranking: selecting the proper attribute that contains the information to answer the question. Modern pre-trained neural models and QA datasets mainly focus on plain text, so they may not gen-eralize well to ranking semi-structured attributes, especially with limited training data. (2). Answer presentation: presenting the answer in a fluent sentence. It is not user-friendly to directly present the semi-structured data to customers, especially for applications like voice assistants. We apply datato-text generation models to convert these data into fluent text.", |
| "cite_spans": [ |
| { |
| "start": 394, |
| "end": 414, |
| "text": "(Frank et al., 2007;", |
| "ref_id": "BIBREF8" |
| }, |
| { |
| "start": 415, |
| "end": 440, |
| "text": "Tapeh and Rahgozar, 2008;", |
| "ref_id": "BIBREF25" |
| }, |
| { |
| "start": 441, |
| "end": 458, |
| "text": "Hui et al., 2013;", |
| "ref_id": "BIBREF13" |
| }, |
| { |
| "start": 459, |
| "end": 475, |
| "text": "Li et al., 2019)", |
| "ref_id": "BIBREF16" |
| }, |
| { |
| "start": 713, |
| "end": 731, |
| "text": "(Cui et al., 2017;", |
| "ref_id": "BIBREF3" |
| }, |
| { |
| "start": 732, |
| "end": 751, |
| "text": "Gupta et al., 2019;", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 752, |
| "end": 769, |
| "text": "Gao et al., 2019;", |
| "ref_id": "BIBREF9" |
| }, |
| { |
| "start": 770, |
| "end": 789, |
| "text": "Zhang et al., 2020)", |
| "ref_id": "BIBREF28" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 1875, |
| "end": 1882, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "For attribute ranking, we build our model upon state-of-the-art pre-trained language models. Due to the small size of our training data, we follow the common practice of prefinetuning the attribute ranker on four large-scale QA datasets: Natural Questions (Kwiatkowski et al., 2019) , AmazonQA (McAuley and Yang, 2016) , NewsQA (Trischler et al., 2017) and Squad (Rajpurkar et al., 2016) . Since these are all based on unstructured text, we also experiment with converting semi-structured attributes into text before being passed to the ranker. Our results show that text-based QA models are quite robust to semi-structured data representations, and can rank attributes correctly with only keyword matching without the extra order information.", |
| "cite_spans": [ |
| { |
| "start": 256, |
| "end": 282, |
| "text": "(Kwiatkowski et al., 2019)", |
| "ref_id": "BIBREF14" |
| }, |
| { |
| "start": 285, |
| "end": 318, |
| "text": "AmazonQA (McAuley and Yang, 2016)", |
| "ref_id": null |
| }, |
| { |
| "start": 328, |
| "end": 352, |
| "text": "(Trischler et al., 2017)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 363, |
| "end": 387, |
| "text": "(Rajpurkar et al., 2016)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "For answer presentation, we consider a questionindependent answer presenter, which is less risky than question-dependent presentation while being more flexible than span extraction or multi-choice selection. We evaluate both a template-based system and a neural sequence-to-sequence generation model. Each template is one or more sentences with gaps that can be filled with pre-defined rules (Deemter et al., 2005) . However, semistructured data does not follow any unified schema, so designing rules to cover all possible data forms or unseen attributes is infeasible. Our neural generation models are initialized with Bart (Lewis et al., 2020) and T5 (Raffel et al., 2020) , two representative pretrained models for generative tasks, and fine-tuned on a small set of annotated examples. Compared with the template system, we show the neural approach improves not only the fluency, but also the faithfulness of presented answers.", |
| "cite_spans": [ |
| { |
| "start": 392, |
| "end": 414, |
| "text": "(Deemter et al., 2005)", |
| "ref_id": "BIBREF4" |
| }, |
| { |
| "start": 625, |
| "end": 645, |
| "text": "(Lewis et al., 2020)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 653, |
| "end": 674, |
| "text": "(Raffel et al., 2020)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "Finally, we discuss and analyse the challenge of generating factually-correct sentences without hallucinate information, as well as the difficulty of handling unseen attributes in both ranking and answer presentation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Introduction", |
| "sec_num": "1" |
| }, |
| { |
| "text": "The data collection contains 3 stages: semistructured attribute collection, text annotation and question sourcing. This section will explain these three stages in order then present the statistics. Attribute Collection We obtain the semistructured attributes of product information from our internal database. These attributes are aggregated from different providers with varied schema. We select 320 unique attribute types from it, filter out information only for internal use and indicator tags containing no actual information like \"lan-guage_tag\", \"attribute_id\" etc. For each of the 320 attribute types, we randomly sample 20 products containing such attribute from 5M products sold in the US market (The 5M products are randomly sampled from different categories), then extract their attribute instances. After removing duplicate ones, we get 3,316 unique attribute instances in the end. We then preprocess them to lower-case all characters, remove emojis and normalize all floats to contain at most 2 decimals. Text Annotation After obtaining the semistructured attributes, we hire annotators from Amazon Mechanical Turk to write a natural sentence for each attribute instance. We restrict to US-based annotators who completed > 500 tasks, out of which more than 97% had been accepted. Before the formal annotation, we did a pilot study with 100 samples. Without extra information, we find 16% of attributes are not understandable to humans, which indicates proper context is necessary to understand the meanings of attributes. Therefore, we also provide the product image and title in the second round of pilot study. By adding the extra information, only 4% of them are not understandable. We then continue with this setting and get all attributes annotated. We also remove all attributes that are not understandable to annotators (usually those that rely on other information to interpret), and end up with 3,191 attribute instances annotated with their description text. Question Sourcing We collect questions on Mechanical Turk by present annotators with the image, title and rating of the product plus one of its associated attribute instances. Annotators are asked to imagine themselves as potential customers, and their task is to ask four questions about this attribute, which means that the attribute contains the information to answer their question. We explicitly add three criteria that annotators must follow: ques- ", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Dataset", |
| "sec_num": "2" |
| }, |
| { |
| "text": "Is the body made out of nylon? Data (key-value): fabric_type:{ value:\"Body: Nylon/spandex; cup linings:100% polyester;cup pad:100% polyurethane.\"} Text:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Question:", |
| "sec_num": null |
| }, |
| { |
| "text": "The body is made of nylon and spandex, the linings in ...", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Question:", |
| "sec_num": null |
| }, |
| { |
| "text": "What kind of devices fit in this? Data (list): compatible_devices: {value:\"apple ipad mini 4\"}; {value:\"apple ...", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Question:", |
| "sec_num": null |
| }, |
| { |
| "text": "The product is compatible with apple ipad mini 4, apple ipad air... Question:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Text:", |
| "sec_num": null |
| }, |
| { |
| "text": "Is (2) Diverse, so the three questions must not be paraphrase each other, and (3) Answerable by the attribute, ensuring that the attribute contains the information to answer the questions. After getting these questions, we lower case them and remove duplicate questions about the same products. Dataset Split and Statistics The dataset will be used to train and evaluate the (1) attribute ranker and (2) answer presenter. For both, we have two test scenarios, one containing only seen attributes with unseen values, and the other containing only unseen attributes to test the model generalization capability. For the unseen scenario, we randomly sample 30 attribute types from all 320 types. We sample 58 instances from them and add into the dev set, while the rest are used as test set. For the seen scenario, we randomly sample 440 instances from the remaining 290 attribute types. 220 of them are added into the dev set and the rest serve as the test set. We use one fixed dev set containing both seen (220) attributes and unseen (58) attributes. All remaining instances serve as training set. Due to the small data size, we perform cross validation to get more reliable results. We repeat the above process ten times with different seeds to get 10 different splits, then train/evaluate on them and average the results. For each question asking about one attribute, we treat all other attribute instances belonging to the same product as negative candidates. The candidate positive-negative ratio is 1:17.89.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Text:", |
| "sec_num": null |
| }, |
| { |
| "text": "Attribute ranking aims to select the proper attribute that contains the information to answer the userposed question. We start from a tf-idf baseline, which has been shown a strong baseline for sentence matching tasks (Arora et al., 2017) . We count the frequency based on the attribute instances on the training set. At test time, we convert question and answer candidate into tf-idf vectors based on the counted frequency, then compute their cosine similarity as the ranking score.", |
| "cite_spans": [ |
| { |
| "start": 218, |
| "end": 238, |
| "text": "(Arora et al., 2017)", |
| "ref_id": "BIBREF0" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attribute Ranking", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Following the common practice, we also tried concatenating the question and candidate attribute into one sequence then feeding into the Robertabase encoder (Liu et al., 2019) , a Transformerbased neural model pretrained on billions of text. The final classifier is built on top of the representation of the first [CLS] token. The multi selfattention layers of the encoder makes sure each token is able to interact with all other tokens to capture the dependency relations. The model is trained to maximize the likelihood of the positive candidates and minimize that of the negative candidates. As for the input form of the semi-structured attribute, we experimented with 5 forms: (1) nameonly: only use the attribute name as input. (2) value-only: only use the attribute value as input.", |
| "cite_spans": [ |
| { |
| "start": 156, |
| "end": 174, |
| "text": "(Liu et al., 2019)", |
| "ref_id": "BIBREF18" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attribute Ranking", |
| "sec_num": "3" |
| }, |
| { |
| "text": "(3) linearized: use the linearized json which concatenates the attribute name and value as input. (4) template: use the template system to generate its corresponding text, then use the generated text as input. (5) neural: use the neural generator to generate its corresponding text, then use the generated text as input.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attribute Ranking", |
| "sec_num": "3" |
| }, |
| { |
| "text": "Due to the limited size of our training data, we follow a two-step setting (Garg et al., 2020) where the Roberta-base model is first fine-tuned on a largescale QA dataset, then fine-tuned on our semiPQA training data. This has been shown to improve performance in the low-resource setting (Hazen et al., 2019; Garg et al., 2020) . We consider 4 datasets: (1) NQ: the Google Natural Questions dataset. We use its sentence selection version (Garg et al., 2020) , where its negative samples are cate-gorized into 4 classes to improve the robustness of the model. It contains 61,186 questions from the Google queries for training. (2) AmazonQA: QA pairs from the Amazon community QA website (Gupta et al., 2019) . We remove answers containing \"I don't know\", \"I'm not sure\" etc, and filter questions more than 32 words and answers more than 64 words. Negative candidates are answers about different questions under the same product. It contains 1,065,407 community questions for training. (3) NewsQA: QAs about news articles (Trischler et al., 2017) . We convert it into sentence selection and drop the span label. For each question, we sample 5 negative sentences not labeled as correct for training. The training dataset contains 75,473 questions. (4) Squad: QAs about wikipedia paragraphs (Rajpurkar et al., 2016) . We treat sentences containing the ground-truth span as positive and other sentences in the same paragraph as negative. The training dataset contains 87,599 questions. Notably, all answers in the above 4 datasets are in form of unstructured sentences..", |
| "cite_spans": [ |
| { |
| "start": 75, |
| "end": 94, |
| "text": "(Garg et al., 2020)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 289, |
| "end": 309, |
| "text": "(Hazen et al., 2019;", |
| "ref_id": "BIBREF12" |
| }, |
| { |
| "start": 310, |
| "end": 328, |
| "text": "Garg et al., 2020)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 439, |
| "end": 458, |
| "text": "(Garg et al., 2020)", |
| "ref_id": "BIBREF10" |
| }, |
| { |
| "start": 687, |
| "end": 707, |
| "text": "(Gupta et al., 2019)", |
| "ref_id": "BIBREF11" |
| }, |
| { |
| "start": 1021, |
| "end": 1045, |
| "text": "(Trischler et al., 2017)", |
| "ref_id": "BIBREF26" |
| }, |
| { |
| "start": 1288, |
| "end": 1312, |
| "text": "(Rajpurkar et al., 2016)", |
| "ref_id": "BIBREF23" |
| } |
| ], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attribute Ranking", |
| "sec_num": "3" |
| }, |
| { |
| "text": "We analyzed the performance under three settings: (1) zeroshot where the model is applied directly to the testsets without using our training data, (2) performance on seen attributes after finetuning on the training data and (2) performance on unseen attributes after finetuning on the training data 2 . Precision@1 results are shown in Figure 1 . We also computed other metrics like MAP, MRR and HIT@5, but they show a similar trend and are omitted for space limit. Zeroshot Performance The zeroshot results are visualized in the upper part of Figure 1 , where we apply the rankers finetuned on different datasets to directly test on our data. As can be seen, when only the attribute name or value is available, the performance is significantly lower than the others, both for neural models and the tf-idf baseline. This suggests we need information from both the attribute name and value to rank attributes properly. Neither of them are sufficient by its own. Neural models finetuned on unstructured text can generally adapt well to semi-structured data (linerized form), except for the one finetuned on NQ which performs poorly compared with others. One reason could be that the negative samples from NQ are finer-grained. It must learn to differentiate between sentences containing correct answer spans but talking about irrelevant things, and correct sentences. Therefore, it must rely on the sentence structure to infer the meaning and decide whether it is relevant or not (Garg et al., 2020) . When directly tested on semi-structured jsons, it cannot easily interpret non-natural sentences. When finetuned on other datasets like NewsQA, AmazonQA and Squad, negative samples are randomly sampled and hardly contain the correct answer span, so the model might only rely on span detection and do not need well-formed sentences. Using template-generated text leads to the best zeroshot performance for all models, next come the neuralgenerated text and linearized json which perform slightly worse. Among all datasets used for finetuning, AmazonQA adapts best for all input formats. This is not surprising considering that it is also about product questions and has the largest data size for finetuning. Finetuned Performance on Seen Attributes The finetuned results on seen attributes is visualized in the middle of Figure 1 . \"Roberta\" indicates the model is initialized with the Roberta-based checkpoint without being finetuned on any other QA datasets in advance. \"NQ\" indicates that the Roberta-based model is first finetuned on NQ, then finetuned on our training data, same for \"Ama-zonQA\", \"NewsQA\" and \"Squad\". Similarly to the zeroshot setting, using only the attribute name or value leads to significantly worse results, although attribute names seem to be more important when finetuning on the training data. Using the linearized json format and the template-generated text have the best overall performance, achieving a precision score of over 85%. Using more natural and fluent text does not help the ranking performance on seen attributes. Although neural generated text are of higher-quality according to human evaluations (to be shown in Section 4), this does not make the ranking task easier and leads to performance drop, suggesting that presentation is not a requirement for ranking and can be addressed separately. Prefinetuning on large-scale text-based QA datasets also does not help the performance on seen attributes, as the Roberta result already achieves similar performance. The model is able to quickly learn the correspondence between questions and seen attributes even with the limited training data.", |
| "cite_spans": [ |
| { |
| "start": 1479, |
| "end": 1498, |
| "text": "(Garg et al., 2020)", |
| "ref_id": "BIBREF10" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 337, |
| "end": 345, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| }, |
| { |
| "start": 545, |
| "end": 553, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| }, |
| { |
| "start": 2320, |
| "end": 2328, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Attribute Ranking", |
| "sec_num": "3" |
| }, |
| { |
| "text": "In the bottom of Figure 1 , we show the finetuned performance when testing on unseen attributes. As expected, a significant performance drop is observed for all models, especially when using attribute names only as this is mostly equivalent to classification over unseen labels. Using neuralgenerated text as input achieves the best performance in all settings. We hypothesise that the neural-generated texts are less rigid and more diverse than template-generated or linearised json data, which prevents the model from overfitting.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 17, |
| "end": 25, |
| "text": "Figure 1", |
| "ref_id": "FIGREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Finetuned Performance on Unseen Attributes", |
| "sec_num": null |
| }, |
| { |
| "text": "Finetuning from Roberta directly performs the worst on average, and finetuning first on Ama-zonQA generally leads to a smaller performance drop with respect to seen attributes. The large amount of questions in AmazonQA, though not helpful for seen attributes, do improve the model robustness over unseen attributes. Analysis As shown above, directly using the linearized json format performs well in the zeroshot setting, which indicates that models finetuned on QA datasets are able to learn to generalize to the json format when finetuning on the sentence format. To investigate this surprising finding, we perform an ablation study in the following settings:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Finetuned Performance on Unseen Attributes", |
| "sec_num": null |
| }, |
| { |
| "text": "1. Remove all quotation marks plus curly braces from the json.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Finetuned Performance on Unseen Attributes", |
| "sec_num": null |
| }, |
| { |
| "text": "2. On top of (1), further remove all colons from the json.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Finetuned Performance on Unseen Attributes", |
| "sec_num": null |
| }, |
| { |
| "text": "3. On top of (2), further shuffle the word order in json.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Finetuned Performance on Unseen Attributes", |
| "sec_num": null |
| }, |
| { |
| "text": "By gradually removing the structural features of the representation, we aim to evaluate whether the model needs this json structure for attribute ranking. The zeroshot p@1 scores obtained are reported in Figure 2 . We also do the same to text generated from the template and neural models.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 204, |
| "end": 212, |
| "text": "Figure 2", |
| "ref_id": "FIGREF1" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Finetuned Performance on Unseen Attributes", |
| "sec_num": null |
| }, |
| { |
| "text": "As can be seen, removing the json structure does not have a great effect on performance. Even after shuffling the word orders completely, the performance drop is within 5% for most models. However, removing either the attribute name or value does lead to significant performance drops, which indicates that the model relies more on semantic matching against both attribute name and value for prediction, rather than on the structure or word order information.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Finetuned Performance on Unseen Attributes", |
| "sec_num": null |
| }, |
| { |
| "text": "Finally, the bottom figure shows that in the zeroshot setting, shuffling the word order reduces the performance for both the linearized, template and neural format. The drop is more for template and neural format but less for the linearized json format. This implies the pretrained QA models are more sensitive to word orders in the sentence format than the structured json format. When finetuned on the training data, however, word orders loses importance. Interestingly, when testing on unseen attribute, shuffling the word order even improves model performance. This further confirmed that for this task, the model does not need to rely on the word order to make predictions, shuffling the word order can even improve the model robustness on generalizing to unseen attributes.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Finetuned Performance on Unseen Attributes", |
| "sec_num": null |
| }, |
| { |
| "text": "The first approach we consider for answer presentation is to use handcrafted templates. However, defining a perfect template for each attribute is challenging due to the lack of a standard schema and templates cannot scale to unseen attributes. With this concern, we also experiment with training a neural data-to-text generator trained with annotated text as the target. Template System When designing the template system, we aim to capture general rules across different attribute types so that one template can be reusable to other similar attributes. We define each template should contain (1) a precondition specializing when to apply the template, (2) one or several corresponding text with gaps to fill, and (3) a set of rules defining how to fill in the gaps. For example, the following is a template defined from the attribute type ARE_BATTERIES_REQUIRED:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Answer Presentation", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Precondition: applies if the POS tag of the attribute name follows the pattern of be_NOUN_VERBed. Rule: (1) If the value is \"Y\" or \"yes\" or \"True\": output \"It VERBs the NOUN\".", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Answer Presentation", |
| "sec_num": "4" |
| }, |
| { |
| "text": "(2) Otherwise: output \"It does not VERB the NOUN\".", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Answer Presentation", |
| "sec_num": "4" |
| }, |
| { |
| "text": "where VERBs and VERBed mean the third person singular and past particle form of the verb. For ARE_BATTERIES_REQUIRED, VERBs would be \"requires\" and VERBed is \"required\". It can also apply to other attribute types following the same pattern like \"is_assembly_required\" and \"is_software_included\".", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Answer Presentation", |
| "sec_num": "4" |
| }, |
| { |
| "text": "During template construction, we maintain a template bank starting from empty. As we see more attribute types, we check if any template from the bank can be applied, and if so, whether it generates the correct text or whether we need to manually update the template. Otherwise, we create a new template for this attribute type. This process is repeated until we go over all the 320 attribute types three times, to refine, merge and fix the template bank and rules. After these rounds, we end up with a total of 23 distinct templates.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Answer Presentation", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Nevertheless, during the construction process, Attribute value Text { value:\"gaspowered\"} The product is gaspowered. { value:\"batteries\"} It runs on batteries. { value:\"Manual\" } This doesn't have power. { value:\"NA\" } This doesn't run on any power. we realize it is nearly impossible to devise a template system to cover all cases well, even for the limited 320 attribute types that we focus on. The difficulty lies in the following two diversities in the data: (1) linguistic diversity: The attribute values do not follow any strict rule. They can be free text as long as it conveys the meaning, which makes it hard to design general rules even for a single attribute type. (2) structural diversity: The json format is a loose structure. The same semantic meaning can be organized in different ways and hierarchies. Applying one rule for different structures can easily lead to parsing errors. Table 2 shows some examples of different values for the same attribute type. We can see that even for one single attribute, it requires many verbalizing rules to handle different structures and attribute values, let alone extending the template rules to multiple attribute types. Neural Generator To avoid pre-defined rules and to generalise to unseen attributes, we train a neural generator model initialized either with Bart (Lewis et al., 2020) or T5 (Raffel et al., 2020) , two stateof-the-art generative models pretrained on large amount of web text with self-supervised objectives. As input, we feed the linearized json-formatted data 3 and the output is the annotated text. We further normalize the numbers in both the attribute and text to keep them in a consistent form, to help the model learn their correspondence in the generation task. For example, we turn forms like \"1.\", \"1.0\" and \"1.00\" into 1, and normalize words to numeric values (\"one\" \u2192 \"1\" etc).", |
| "cite_spans": [ |
| { |
| "start": 1323, |
| "end": 1343, |
| "text": "(Lewis et al., 2020)", |
| "ref_id": "BIBREF15" |
| }, |
| { |
| "start": 1350, |
| "end": 1371, |
| "text": "(Raffel et al., 2020)", |
| "ref_id": "BIBREF22" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 896, |
| "end": 903, |
| "text": "Table 2", |
| "ref_id": "TABREF2" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Answer Presentation", |
| "sec_num": "4" |
| }, |
| { |
| "text": "To minimize the changes of hallucination in the generation, we also delexicalize words in the an- Ferreira et al., 2019; Chang et al., 2020b,a) . The tag is the linearized path from the root node (attribute name) to the tag of the value. For example, for the second sentence in Table 1 , the text \"The product is compatible with ...\" will be delexicalized into \"The product is compatible for (concatenated) [value] .\" In the testing phase, after the model decodes the delexicalized text, the tag is then replaced to the corresponding value in the input attribute. While this can provide the model with a clear correspondence between input and output, it also adds the risk of losing the linguistic information like tense, singular/plural after delexicalization.", |
| "cite_spans": [ |
| { |
| "start": 98, |
| "end": 120, |
| "text": "Ferreira et al., 2019;", |
| "ref_id": "BIBREF7" |
| }, |
| { |
| "start": 121, |
| "end": 143, |
| "text": "Chang et al., 2020b,a)", |
| "ref_id": null |
| }, |
| { |
| "start": 407, |
| "end": 414, |
| "text": "[value]", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 278, |
| "end": 285, |
| "text": "Table 1", |
| "ref_id": "TABREF0" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Answer Presentation", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Automatic Evaluation For the automatic metrics, we report the BLEU (Papineni et al., 2002) , chrF (Popovi\u0107, 2015) and PARENT-F1 (Dhingra et al., 2019) score. The results of automatic metrics are shown in Table 3 , where we try different sizes of models and list their number of model parameters (#PARAMs). Generally all the three metrics correlate well with each other. As expected, larger models tend to perform better than smaller models, with a larger difference on unseen versus seen attributes, which suggests that larger models generalize better than smaller models on unseen attributes. This could be because larger models are encoded with more language knowledge, which makes them less likely to overfit to the attributes in the training data.", |
| "cite_spans": [ |
| { |
| "start": 67, |
| "end": 90, |
| "text": "(Papineni et al., 2002)", |
| "ref_id": "BIBREF20" |
| }, |
| { |
| "start": 98, |
| "end": 113, |
| "text": "(Popovi\u0107, 2015)", |
| "ref_id": "BIBREF21" |
| }, |
| { |
| "start": 128, |
| "end": 150, |
| "text": "(Dhingra et al., 2019)", |
| "ref_id": "BIBREF5" |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 204, |
| "end": 211, |
| "text": "Table 3", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Answer Presentation", |
| "sec_num": "4" |
| }, |
| { |
| "text": "T5-large achieves the best performance across all metrics. Therefore, we train with the delexicalized text as mentioned in Section 4 based on T5-large to see if the delexicalization can improve the performance further (T5-L (delex) in the table). All scores are evaluated on the lexicalized text output, which means that all delexicalized parts have been replaced with the input attribute values so that we can have a fair evaluation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Answer Presentation", |
| "sec_num": "4" |
| }, |
| { |
| "text": "Delexicalization, unfortunately, does not help with the performance. It lowers down the scores over all metrics compared with directly using the original text as the target. The reason could be that T5 is pretrained with natural text itself. It has no delexicalized slots in its training corpus. Therefore, it fails to adapt well to the format of delexicalized text. Indeed, we find that T-5 sometimes generates text with slot names that do not exist in the input attribute which affects its performance. For future research, it would be interesting to see how to adapt pretrained generative models to delexicalized text, or even directly pretraining large-scaled generative models on delexicalized text.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Answer Presentation", |
| "sec_num": "4" |
| }, |
| { |
| "text": "We conduct a human evaluation of the generated texts, focusing the following three dimensions: (1) Faithfulness, whether the text is faithful to the attribute (binary). (2) Coverage, whether the text covers all contents in the attribute (binary). (3) Naturalness, whether the text is a natural sentence rather than a machinegenerated rigid one. 4-ary score from 1(rigid), 2(slightly rigid), 3(slightly natural) to 4(natural) On seen attributes, we evaluate the T5-large and T5large with delexicalized text (T5-L (delex)), plus the template system and the annotated reference.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Human Evaluation", |
| "sec_num": null |
| }, |
| { |
| "text": "From T5-large allergen_information: { value:gluten_free }; { value:dairy_free } allergen warning: the product contains gluten free,dairy free.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attribute From Template", |
| "sec_num": null |
| }, |
| { |
| "text": "this product is gluten free and dairy free.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attribute From Template", |
| "sec_num": null |
| }, |
| { |
| "text": "team_name: { value:\"null\" } the team name of the product is null. this does not have a team name. speaker_type: {value:\"portable bluetooth speakers\"} the product has a portable bluetooth speakers speaker. this is a portable bluetooth speaker. installation_type: value:\"driver side\" the product is installed using the driver side. this is installed on the driver side. Table 4 : Example of template-generated texts that are labeled as unfaithful.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 368, |
| "end": 375, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Attribute From Template", |
| "sec_num": null |
| }, |
| { |
| "text": "Reference From T5-large size_per_pearl: { value:\"iphone\" } it is an iphone. the product has an iphone size pearl. switch_type: { value:\"rotary switch\" } this has a switch that turns. the product has a rotary switch. target_species: { value:\"Dog\" } for dogs. this is for dogs. installed_size: [{unit: un-known_modifier, value:32.}] its cache memory installed_size: [{unit: unknown_modifier, value:32 .}] the product has a cache memory of 32 units. Table 5 : Example references which are labeled as unfaithful(first two rows) or unnatural (last two rows).", |
| "cite_spans": [ |
| { |
| "start": 292, |
| "end": 330, |
| "text": "[{unit: un-known_modifier, value:32.}]", |
| "ref_id": null |
| }, |
| { |
| "start": 364, |
| "end": 398, |
| "text": "[{unit: unknown_modifier, value:32", |
| "ref_id": null |
| } |
| ], |
| "ref_spans": [ |
| { |
| "start": 447, |
| "end": 454, |
| "text": "Table 5", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Attribute", |
| "sec_num": null |
| }, |
| { |
| "text": "On unseen attributes, we only evaluate T5-large and the reference since handcrafted templates cannot be applied to unseen attributes at all. From each of 10 data splits, we randomly sample 50 attributes from it such that each model has 500 attribute-text pairs being evaluated. Each pair is evaluated by three annotators. The final scores are averaged over the 500 pairs for each model. We show the results and the agreement score among annotators in Table 3 and Table 6 respectively.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 451, |
| "end": 470, |
| "text": "Table 3 and Table 6", |
| "ref_id": "TABREF4" |
| } |
| ], |
| "eq_spans": [], |
| "section": "Attribute", |
| "sec_num": null |
| }, |
| { |
| "text": "Faithful Coverage Natur-4class Natur-2class 0.97762 0.97402 0.80499 0.92569 Table 6 : Agreement Score for Answer Presentation.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 76, |
| "end": 83, |
| "text": "Table 6", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Attribute", |
| "sec_num": null |
| }, |
| { |
| "text": "Overall, the evaluation has a rather high agreement score. Naturalness has the lowest agreement since it is 4-ary. We also calculate the binary score for naturalness by combining natural and slightly natural into one bucket, and combining rigid and slightly rigid into the other bucket. The agreement score grows to over 0.92 by this means. We then manually checked and corrected all attribute-text pairs that do not have an agreement score of 1 for faithfulness and coverage. For naturalness, as it is a rather subjective metric anyway, we do not correct it. We also manually verified the faithfulness and coverage for the attribute nutritional_info, which we find especially hard to be evaluated correctly due to its complexity.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attribute", |
| "sec_num": null |
| }, |
| { |
| "text": "Overall all models have high scores on both faithfulness and coverage, and differences are small. For naturalness, as expected, templates have the lowest score. Using delexicalization underperforms the standard neural model, which is consistent with the findings from the automatic metric scores. We observe two interesting phenomena:", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Attribute", |
| "sec_num": null |
| }, |
| { |
| "text": "(1) Neural models outperform templates even for faithfulness and (2) Neural models outperform human references for faithfulness and naturalness. In Table 4 and 5, we list examples of text generated from templates/references that are labeled as unfaithful/unnatural to the attribute. As can be seen, errors in template-generated texts usually occur because the templates designed for certain values do not apply to a new value. Errors in humans references are due to annotation noise, which is usually inevitable. The T5 model outperforms the reference, suggesting that it is able to round up these few annotation errors and learn the general pattern from the most correct references.", |
| "cite_spans": [], |
| "ref_spans": [ |
| { |
| "start": 148, |
| "end": 155, |
| "text": "Table 4", |
| "ref_id": null |
| } |
| ], |
| "eq_spans": [], |
| "section": "Attribute", |
| "sec_num": null |
| }, |
| { |
| "text": "In this work, we study how to effectively leverage semi-structured data for product question answering. As there is no public datasets for this problem, we collect a dataset containing manually annotated questions together with description text about semi-structured attributes from our internal database. We present empirical results and findings about two key challenges of this problem: attribute ranking and answer presentation . Experiments show that neural models can provide superior text than template systems and perform well for ranking seen attributes, albeit there is still a noticeable drop when it comes to unseen attributes for both ranking and generation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "Conclusion", |
| "sec_num": "5" |
| }, |
| { |
| "text": "As json is a standard format for storing data with arbitrary types/schemata, other representations (such as tables or xml files) can be easily mapped to it.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "In the zeroshot setting, we only evaluate on the seen attribute split since there is no concept of \"seen\" or \"unseen\" for zeroshot evaluation.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| }, |
| { |
| "text": "We also tried other input formats like flattening the hierarchical structure, adding instruction prompts(Schick and Sch\u00fctze, 2020; Liu et al., 2021) etc, but did not find significant improvements.", |
| "cite_spans": [], |
| "ref_spans": [], |
| "eq_spans": [], |
| "section": "", |
| "sec_num": null |
| } |
| ], |
| "back_matter": [], |
| "bib_entries": { |
| "BIBREF0": { |
| "ref_id": "b0", |
| "title": "A simple but tough-to-beat baseline for sentence embeddings", |
| "authors": [ |
| { |
| "first": "Sanjeev", |
| "middle": [], |
| "last": "Arora", |
| "suffix": "" |
| }, |
| { |
| "first": "Yingyu", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| }, |
| { |
| "first": "Tengyu", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "5th International Conference on Learning Representations", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Sanjeev Arora, Yingyu Liang, and Tengyu Ma. 2017. A simple but tough-to-beat baseline for sentence embed- dings. In 5th International Conference on Learning Representations, ICLR 2017.", |
| "links": null |
| }, |
| "BIBREF1": { |
| "ref_id": "b1", |
| "title": "Unsupervised pidgin text generation by pivoting english data and self-training", |
| "authors": [ |
| { |
| "first": "Ernie", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [ |
| "Ifeoluwa" |
| ], |
| "last": "Adelani", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaoyu", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Vera", |
| "middle": [], |
| "last": "Demberg", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2003.08272" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ernie Chang, David Ifeoluwa Adelani, Xiaoyu Shen, and Vera Demberg. 2020a. Unsupervised pidgin text generation by pivoting english data and self-training. arXiv preprint arXiv:2003.08272.", |
| "links": null |
| }, |
| "BIBREF2": { |
| "ref_id": "b2", |
| "title": "Dart: A lightweight quality-suggestive data-to-text annotation tool", |
| "authors": [ |
| { |
| "first": "Ernie", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Jeriah", |
| "middle": [], |
| "last": "Caplinger", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Marin", |
| "suffix": "" |
| }, |
| { |
| "first": "Xiaoyu", |
| "middle": [], |
| "last": "Shen", |
| "suffix": "" |
| }, |
| { |
| "first": "Vera", |
| "middle": [], |
| "last": "Demberg", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 28th International Conference on Computational Linguistics: System Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "12--17", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ernie Chang, Jeriah Caplinger, Alex Marin, Xiaoyu Shen, and Vera Demberg. 2020b. Dart: A lightweight quality-suggestive data-to-text annotation tool. In Proceedings of the 28th International Conference on Computational Linguistics: System Demonstrations, pages 12-17.", |
| "links": null |
| }, |
| "BIBREF3": { |
| "ref_id": "b3", |
| "title": "Superagent: A customer service chatbot for e-commerce websites", |
| "authors": [ |
| { |
| "first": "Lei", |
| "middle": [], |
| "last": "Cui", |
| "suffix": "" |
| }, |
| { |
| "first": "Shaohan", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Furu", |
| "middle": [], |
| "last": "Wei", |
| "suffix": "" |
| }, |
| { |
| "first": "Chuanqi", |
| "middle": [], |
| "last": "Tan", |
| "suffix": "" |
| }, |
| { |
| "first": "Chaoqun", |
| "middle": [], |
| "last": "Duan", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of ACL 2017, System Demonstrations", |
| "volume": "", |
| "issue": "", |
| "pages": "97--102", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Lei Cui, Shaohan Huang, Furu Wei, Chuanqi Tan, Chao- qun Duan, and Ming Zhou. 2017. Superagent: A customer service chatbot for e-commerce websites. In Proceedings of ACL 2017, System Demonstrations, pages 97-102.", |
| "links": null |
| }, |
| "BIBREF4": { |
| "ref_id": "b4", |
| "title": "Real versus template-based natural language generation: A false opposition? Computational linguistics", |
| "authors": [ |
| { |
| "first": "Mari\u00ebt", |
| "middle": [], |
| "last": "Kees Van Deemter", |
| "suffix": "" |
| }, |
| { |
| "first": "Emiel", |
| "middle": [], |
| "last": "Theune", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Krahmer", |
| "suffix": "" |
| } |
| ], |
| "year": 2005, |
| "venue": "", |
| "volume": "31", |
| "issue": "", |
| "pages": "15--24", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kees van Deemter, Mari\u00ebt Theune, and Emiel Krahmer. 2005. Real versus template-based natural language generation: A false opposition? Computational lin- guistics, 31(1):15-24.", |
| "links": null |
| }, |
| "BIBREF5": { |
| "ref_id": "b5", |
| "title": "Handling divergent reference texts when evaluating table-to-text generation", |
| "authors": [ |
| { |
| "first": "Bhuwan", |
| "middle": [], |
| "last": "Dhingra", |
| "suffix": "" |
| }, |
| { |
| "first": "Manaal", |
| "middle": [], |
| "last": "Faruqui", |
| "suffix": "" |
| }, |
| { |
| "first": "Ankur", |
| "middle": [], |
| "last": "Parikh", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming-Wei", |
| "middle": [], |
| "last": "Chang", |
| "suffix": "" |
| }, |
| { |
| "first": "Dipanjan", |
| "middle": [], |
| "last": "Das", |
| "suffix": "" |
| }, |
| { |
| "first": "William", |
| "middle": [], |
| "last": "Cohen", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 57th", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Bhuwan Dhingra, Manaal Faruqui, Ankur Parikh, Ming- Wei Chang, Dipanjan Das, and William Cohen. 2019. Handling divergent reference texts when evaluating table-to-text generation. In Proceedings of the 57th", |
| "links": null |
| }, |
| "BIBREF6": { |
| "ref_id": "b6", |
| "title": "Annual Meeting of the Association for Computational Linguistics", |
| "authors": [], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "4884--4895", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Annual Meeting of the Association for Computational Linguistics, pages 4884-4895.", |
| "links": null |
| }, |
| "BIBREF7": { |
| "ref_id": "b7", |
| "title": "Neural datato-text generation: A comparison between pipeline and end-to-end architectures", |
| "authors": [ |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Thiago Castro Ferreira", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Van Der Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Emiel", |
| "middle": [], |
| "last": "Emiel Van Miltenburg", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Krahmer", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the 2019 Conference on Empirical Methods in Natural Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "552--562", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Thiago Castro Ferreira, Chris van der Lee, Emiel van Miltenburg, and Emiel Krahmer. 2019. Neural data- to-text generation: A comparison between pipeline and end-to-end architectures. In Proceedings of the 2019 Conference on Empirical Methods in Natu- ral Language Processing and the 9th International Joint Conference on Natural Language Processing (EMNLP-IJCNLP), pages 552-562.", |
| "links": null |
| }, |
| "BIBREF8": { |
| "ref_id": "b8", |
| "title": "Question answering from structured knowledge sources", |
| "authors": [ |
| { |
| "first": "Anette", |
| "middle": [], |
| "last": "Frank", |
| "suffix": "" |
| }, |
| { |
| "first": "Hans-Ulrich", |
| "middle": [], |
| "last": "Krieger", |
| "suffix": "" |
| }, |
| { |
| "first": "Feiyu", |
| "middle": [], |
| "last": "Xu", |
| "suffix": "" |
| }, |
| { |
| "first": "Hans", |
| "middle": [], |
| "last": "Uszkoreit", |
| "suffix": "" |
| }, |
| { |
| "first": "Berthold", |
| "middle": [], |
| "last": "Crysmann", |
| "suffix": "" |
| }, |
| { |
| "first": "Brigitte", |
| "middle": [], |
| "last": "J\u00f6rg", |
| "suffix": "" |
| }, |
| { |
| "first": "Ulrich", |
| "middle": [], |
| "last": "Sch\u00e4fer", |
| "suffix": "" |
| } |
| ], |
| "year": 2007, |
| "venue": "Journal of Applied Logic", |
| "volume": "5", |
| "issue": "1", |
| "pages": "20--48", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Anette Frank, Hans-Ulrich Krieger, Feiyu Xu, Hans Uszkoreit, Berthold Crysmann, Brigitte J\u00f6rg, and Ul- rich Sch\u00e4fer. 2007. Question answering from struc- tured knowledge sources. Journal of Applied Logic, 5(1):20-48.", |
| "links": null |
| }, |
| "BIBREF9": { |
| "ref_id": "b9", |
| "title": "Product-aware answer generation in e-commerce question-answering", |
| "authors": [ |
| { |
| "first": "Shen", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhaochun", |
| "middle": [], |
| "last": "Ren", |
| "suffix": "" |
| }, |
| { |
| "first": "Yihong", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Dongyan", |
| "middle": [], |
| "last": "Zhao", |
| "suffix": "" |
| }, |
| { |
| "first": "Dawei", |
| "middle": [], |
| "last": "Yin", |
| "suffix": "" |
| }, |
| { |
| "first": "Rui", |
| "middle": [], |
| "last": "Yan", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Proceedings of the Twelfth ACM International Conference on Web Search and Data Mining", |
| "volume": "", |
| "issue": "", |
| "pages": "429--437", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Shen Gao, Zhaochun Ren, Yihong Zhao, Dongyan Zhao, Dawei Yin, and Rui Yan. 2019. Product-aware an- swer generation in e-commerce question-answering. In Proceedings of the Twelfth ACM International Conference on Web Search and Data Mining, pages 429-437.", |
| "links": null |
| }, |
| "BIBREF10": { |
| "ref_id": "b10", |
| "title": "Tanda: Transfer and adapt pre-trained transformer models for answer sentence selection", |
| "authors": [ |
| { |
| "first": "Siddhant", |
| "middle": [], |
| "last": "Garg", |
| "suffix": "" |
| }, |
| { |
| "first": "Thuy", |
| "middle": [], |
| "last": "Vu", |
| "suffix": "" |
| }, |
| { |
| "first": "Alessandro", |
| "middle": [], |
| "last": "Moschitti", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the AAAI Conference on Artificial Intelligence", |
| "volume": "34", |
| "issue": "", |
| "pages": "7780--7788", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Siddhant Garg, Thuy Vu, and Alessandro Moschitti. 2020. Tanda: Transfer and adapt pre-trained trans- former models for answer sentence selection. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 34, pages 7780-7788.", |
| "links": null |
| }, |
| "BIBREF11": { |
| "ref_id": "b11", |
| "title": "Amazonqa: A review-based question answering task", |
| "authors": [ |
| { |
| "first": "Mansi", |
| "middle": [], |
| "last": "Gupta", |
| "suffix": "" |
| }, |
| { |
| "first": "Nitish", |
| "middle": [], |
| "last": "Kulkarni", |
| "suffix": "" |
| }, |
| { |
| "first": "Raghuveer", |
| "middle": [], |
| "last": "Chanda", |
| "suffix": "" |
| }, |
| { |
| "first": "Anirudha", |
| "middle": [], |
| "last": "Rayasam", |
| "suffix": "" |
| }, |
| { |
| "first": "Zachary", |
| "middle": [ |
| "C" |
| ], |
| "last": "Lipton", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1908.04364" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mansi Gupta, Nitish Kulkarni, Raghuveer Chanda, Anirudha Rayasam, and Zachary C Lipton. 2019. Amazonqa: A review-based question answering task. arXiv preprint arXiv:1908.04364.", |
| "links": null |
| }, |
| "BIBREF12": { |
| "ref_id": "b12", |
| "title": "Towards domain adaptation from limited data for question answering using deep neural networks", |
| "authors": [ |
| { |
| "first": "J", |
| "middle": [], |
| "last": "Timothy", |
| "suffix": "" |
| }, |
| { |
| "first": "Shehzaad", |
| "middle": [], |
| "last": "Hazen", |
| "suffix": "" |
| }, |
| { |
| "first": "Daniel", |
| "middle": [], |
| "last": "Dhuliawala", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Boies", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1911.02655" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Timothy J Hazen, Shehzaad Dhuliawala, and Daniel Boies. 2019. Towards domain adaptation from lim- ited data for question answering using deep neural networks. arXiv preprint arXiv:1911.02655.", |
| "links": null |
| }, |
| "BIBREF13": { |
| "ref_id": "b13", |
| "title": "Sponsored search ad selection by keyword structure analysis", |
| "authors": [ |
| { |
| "first": "Kai", |
| "middle": [], |
| "last": "Hui", |
| "suffix": "" |
| }, |
| { |
| "first": "Bin", |
| "middle": [], |
| "last": "Gao", |
| "suffix": "" |
| }, |
| { |
| "first": "Ben", |
| "middle": [], |
| "last": "He", |
| "suffix": "" |
| }, |
| { |
| "first": "Tie-Jian", |
| "middle": [], |
| "last": "Luo", |
| "suffix": "" |
| } |
| ], |
| "year": 2013, |
| "venue": "European Conference on Information Retrieval", |
| "volume": "", |
| "issue": "", |
| "pages": "230--241", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kai Hui, Bin Gao, Ben He, and Tie-jian Luo. 2013. Sponsored search ad selection by keyword structure analysis. In European Conference on Information Retrieval, pages 230-241. Springer.", |
| "links": null |
| }, |
| "BIBREF14": { |
| "ref_id": "b14", |
| "title": "Natural questions: a benchmark for question answering research", |
| "authors": [ |
| { |
| "first": "Tom", |
| "middle": [], |
| "last": "Kwiatkowski", |
| "suffix": "" |
| }, |
| { |
| "first": "Jennimaria", |
| "middle": [], |
| "last": "Palomaki", |
| "suffix": "" |
| }, |
| { |
| "first": "Olivia", |
| "middle": [], |
| "last": "Redfield", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Collins", |
| "suffix": "" |
| }, |
| { |
| "first": "Ankur", |
| "middle": [], |
| "last": "Parikh", |
| "suffix": "" |
| }, |
| { |
| "first": "Chris", |
| "middle": [], |
| "last": "Alberti", |
| "suffix": "" |
| }, |
| { |
| "first": "Danielle", |
| "middle": [], |
| "last": "Epstein", |
| "suffix": "" |
| }, |
| { |
| "first": "Illia", |
| "middle": [], |
| "last": "Polosukhin", |
| "suffix": "" |
| }, |
| { |
| "first": "Jacob", |
| "middle": [], |
| "last": "Devlin", |
| "suffix": "" |
| }, |
| { |
| "first": "Kenton", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "Transactions of the Association for Computational Linguistics", |
| "volume": "7", |
| "issue": "", |
| "pages": "453--466", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tom Kwiatkowski, Jennimaria Palomaki, Olivia Red- field, Michael Collins, Ankur Parikh, Chris Alberti, Danielle Epstein, Illia Polosukhin, Jacob Devlin, Ken- ton Lee, et al. 2019. Natural questions: a benchmark for question answering research. Transactions of the Association for Computational Linguistics, 7:453- 466.", |
| "links": null |
| }, |
| "BIBREF15": { |
| "ref_id": "b15", |
| "title": "Bart: Denoising sequence-to-sequence pre-training for natural language generation, translation, and comprehension", |
| "authors": [ |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Yinhan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Marjan", |
| "middle": [], |
| "last": "Ghazvininejad", |
| "suffix": "" |
| }, |
| { |
| "first": "Abdelrahman", |
| "middle": [], |
| "last": "Mohamed", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "7871--7880", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Mike Lewis, Yinhan Liu, Naman Goyal, Marjan Ghazvininejad, Abdelrahman Mohamed, Omer Levy, Veselin Stoyanov, and Luke Zettlemoyer. 2020. Bart: Denoising sequence-to-sequence pre-training for nat- ural language generation, translation, and comprehen- sion. In Proceedings of the 58th Annual Meeting of the Association for Computational Linguistics, pages 7871-7880.", |
| "links": null |
| }, |
| "BIBREF16": { |
| "ref_id": "b16", |
| "title": "Alime kbqa: Question answering over structured knowledge for e-commerce customer service", |
| "authors": [ |
| { |
| "first": "Feng-Lin", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Weijia", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Qi", |
| "middle": [], |
| "last": "Huang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yikun", |
| "middle": [], |
| "last": "Guo", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "China Conference on Knowledge Graph and Semantic Computing", |
| "volume": "", |
| "issue": "", |
| "pages": "136--148", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Feng-Lin Li, Weijia Chen, Qi Huang, and Yikun Guo. 2019. Alime kbqa: Question answering over struc- tured knowledge for e-commerce customer service. In China Conference on Knowledge Graph and Se- mantic Computing, pages 136-148. Springer.", |
| "links": null |
| }, |
| "BIBREF17": { |
| "ref_id": "b17", |
| "title": "Zhilin Yang, and Jie Tang. 2021. Gpt understands, too", |
| "authors": [ |
| { |
| "first": "Xiao", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Yanan", |
| "middle": [], |
| "last": "Zheng", |
| "suffix": "" |
| }, |
| { |
| "first": "Zhengxiao", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| }, |
| { |
| "first": "Ming", |
| "middle": [], |
| "last": "Ding", |
| "suffix": "" |
| }, |
| { |
| "first": "Yujie", |
| "middle": [], |
| "last": "Qian", |
| "suffix": "" |
| } |
| ], |
| "year": null, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2103.10385" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Xiao Liu, Yanan Zheng, Zhengxiao Du, Ming Ding, Yujie Qian, Zhilin Yang, and Jie Tang. 2021. Gpt understands, too. arXiv preprint arXiv:2103.10385.", |
| "links": null |
| }, |
| "BIBREF18": { |
| "ref_id": "b18", |
| "title": "Roberta: A robustly optimized bert pretraining approach", |
| "authors": [ |
| { |
| "first": "Yinhan", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| }, |
| { |
| "first": "Myle", |
| "middle": [], |
| "last": "Ott", |
| "suffix": "" |
| }, |
| { |
| "first": "Naman", |
| "middle": [], |
| "last": "Goyal", |
| "suffix": "" |
| }, |
| { |
| "first": "Jingfei", |
| "middle": [], |
| "last": "Du", |
| "suffix": "" |
| }, |
| { |
| "first": "Mandar", |
| "middle": [], |
| "last": "Joshi", |
| "suffix": "" |
| }, |
| { |
| "first": "Danqi", |
| "middle": [], |
| "last": "Chen", |
| "suffix": "" |
| }, |
| { |
| "first": "Omer", |
| "middle": [], |
| "last": "Levy", |
| "suffix": "" |
| }, |
| { |
| "first": "Mike", |
| "middle": [], |
| "last": "Lewis", |
| "suffix": "" |
| }, |
| { |
| "first": "Luke", |
| "middle": [], |
| "last": "Zettlemoyer", |
| "suffix": "" |
| }, |
| { |
| "first": "Veselin", |
| "middle": [], |
| "last": "Stoyanov", |
| "suffix": "" |
| } |
| ], |
| "year": 2019, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:1907.11692" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Yinhan Liu, Myle Ott, Naman Goyal, Jingfei Du, Man- dar Joshi, Danqi Chen, Omer Levy, Mike Lewis, Luke Zettlemoyer, and Veselin Stoyanov. 2019. Roberta: A robustly optimized bert pretraining ap- proach. arXiv preprint arXiv:1907.11692.", |
| "links": null |
| }, |
| "BIBREF19": { |
| "ref_id": "b19", |
| "title": "Addressing complex and subjective product-related queries with customer reviews", |
| "authors": [ |
| { |
| "first": "Julian", |
| "middle": [], |
| "last": "Mcauley", |
| "suffix": "" |
| }, |
| { |
| "first": "Alex", |
| "middle": [], |
| "last": "Yang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 25th International Conference on World Wide Web", |
| "volume": "", |
| "issue": "", |
| "pages": "625--635", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Julian McAuley and Alex Yang. 2016. Addressing complex and subjective product-related queries with customer reviews. In Proceedings of the 25th In- ternational Conference on World Wide Web, pages 625-635.", |
| "links": null |
| }, |
| "BIBREF20": { |
| "ref_id": "b20", |
| "title": "Bleu: a method for automatic evaluation of machine translation", |
| "authors": [ |
| { |
| "first": "Kishore", |
| "middle": [], |
| "last": "Papineni", |
| "suffix": "" |
| }, |
| { |
| "first": "Salim", |
| "middle": [], |
| "last": "Roukos", |
| "suffix": "" |
| }, |
| { |
| "first": "Todd", |
| "middle": [], |
| "last": "Ward", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei-Jing", |
| "middle": [], |
| "last": "Zhu", |
| "suffix": "" |
| } |
| ], |
| "year": 2002, |
| "venue": "Proceedings of the 40th annual meeting of the Association for Computational Linguistics", |
| "volume": "", |
| "issue": "", |
| "pages": "311--318", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Kishore Papineni, Salim Roukos, Todd Ward, and Wei- Jing Zhu. 2002. Bleu: a method for automatic evalu- ation of machine translation. In Proceedings of the 40th annual meeting of the Association for Computa- tional Linguistics, pages 311-318.", |
| "links": null |
| }, |
| "BIBREF21": { |
| "ref_id": "b21", |
| "title": "chrf: character n-gram f-score for automatic mt evaluation", |
| "authors": [ |
| { |
| "first": "Maja", |
| "middle": [], |
| "last": "Popovi\u0107", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the Tenth Workshop on Statistical Machine Translation", |
| "volume": "", |
| "issue": "", |
| "pages": "392--395", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Maja Popovi\u0107. 2015. chrf: character n-gram f-score for automatic mt evaluation. In Proceedings of the Tenth Workshop on Statistical Machine Translation, pages 392-395.", |
| "links": null |
| }, |
| "BIBREF22": { |
| "ref_id": "b22", |
| "title": "Exploring the limits of transfer learning with a unified text-to-text transformer", |
| "authors": [ |
| { |
| "first": "Colin", |
| "middle": [], |
| "last": "Raffel", |
| "suffix": "" |
| }, |
| { |
| "first": "Noam", |
| "middle": [], |
| "last": "Shazeer", |
| "suffix": "" |
| }, |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Roberts", |
| "suffix": "" |
| }, |
| { |
| "first": "Katherine", |
| "middle": [], |
| "last": "Lee", |
| "suffix": "" |
| }, |
| { |
| "first": "Sharan", |
| "middle": [], |
| "last": "Narang", |
| "suffix": "" |
| }, |
| { |
| "first": "Michael", |
| "middle": [], |
| "last": "Matena", |
| "suffix": "" |
| }, |
| { |
| "first": "Yanqi", |
| "middle": [], |
| "last": "Zhou", |
| "suffix": "" |
| }, |
| { |
| "first": "Wei", |
| "middle": [], |
| "last": "Li", |
| "suffix": "" |
| }, |
| { |
| "first": "Peter J", |
| "middle": [], |
| "last": "Liu", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Journal of Machine Learning Research", |
| "volume": "21", |
| "issue": "", |
| "pages": "1--67", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J Liu. 2020. Exploring the limits of transfer learning with a unified text-to-text trans- former. Journal of Machine Learning Research, 21:1- 67.", |
| "links": null |
| }, |
| "BIBREF23": { |
| "ref_id": "b23", |
| "title": "Squad: 100,000+ questions for machine comprehension of text", |
| "authors": [ |
| { |
| "first": "Pranav", |
| "middle": [], |
| "last": "Rajpurkar", |
| "suffix": "" |
| }, |
| { |
| "first": "Jian", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Konstantin", |
| "middle": [], |
| "last": "Lopyrev", |
| "suffix": "" |
| }, |
| { |
| "first": "Percy", |
| "middle": [], |
| "last": "Liang", |
| "suffix": "" |
| } |
| ], |
| "year": 2016, |
| "venue": "Proceedings of the 2016 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "2383--2392", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Pranav Rajpurkar, Jian Zhang, Konstantin Lopyrev, and Percy Liang. 2016. Squad: 100,000+ questions for machine comprehension of text. In Proceedings of the 2016 Conference on Empirical Methods in Natu- ral Language Processing, pages 2383-2392.", |
| "links": null |
| }, |
| "BIBREF24": { |
| "ref_id": "b24", |
| "title": "Few-shot text generation with pattern-exploiting training", |
| "authors": [ |
| { |
| "first": "Timo", |
| "middle": [], |
| "last": "Schick", |
| "suffix": "" |
| }, |
| { |
| "first": "Hinrich", |
| "middle": [], |
| "last": "Sch\u00fctze", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "", |
| "volume": "", |
| "issue": "", |
| "pages": "", |
| "other_ids": { |
| "arXiv": [ |
| "arXiv:2012.11926" |
| ] |
| }, |
| "num": null, |
| "urls": [], |
| "raw_text": "Timo Schick and Hinrich Sch\u00fctze. 2020. Few-shot text generation with pattern-exploiting training. arXiv preprint arXiv:2012.11926.", |
| "links": null |
| }, |
| "BIBREF25": { |
| "ref_id": "b25", |
| "title": "A knowledge-based question answering system for b2c ecommerce. Knowledge-Based Systems", |
| "authors": [ |
| { |
| "first": "Ali", |
| "middle": [], |
| "last": "Ghobadi Tapeh", |
| "suffix": "" |
| }, |
| { |
| "first": "Maseud", |
| "middle": [], |
| "last": "Rahgozar", |
| "suffix": "" |
| } |
| ], |
| "year": 2008, |
| "venue": "", |
| "volume": "21", |
| "issue": "", |
| "pages": "946--950", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Ali Ghobadi Tapeh and Maseud Rahgozar. 2008. A knowledge-based question answering system for b2c ecommerce. Knowledge-Based Systems, 21(8):946- 950.", |
| "links": null |
| }, |
| "BIBREF26": { |
| "ref_id": "b26", |
| "title": "Newsqa: A machine comprehension dataset", |
| "authors": [ |
| { |
| "first": "Adam", |
| "middle": [], |
| "last": "Trischler", |
| "suffix": "" |
| }, |
| { |
| "first": "Tong", |
| "middle": [], |
| "last": "Wang", |
| "suffix": "" |
| }, |
| { |
| "first": "Xingdi", |
| "middle": [], |
| "last": "Yuan", |
| "suffix": "" |
| }, |
| { |
| "first": "Justin", |
| "middle": [], |
| "last": "Harris", |
| "suffix": "" |
| }, |
| { |
| "first": "Alessandro", |
| "middle": [], |
| "last": "Sordoni", |
| "suffix": "" |
| }, |
| { |
| "first": "Philip", |
| "middle": [], |
| "last": "Bachman", |
| "suffix": "" |
| }, |
| { |
| "first": "Kaheer", |
| "middle": [], |
| "last": "Suleman", |
| "suffix": "" |
| } |
| ], |
| "year": 2017, |
| "venue": "Proceedings of the 2nd Workshop on Representation Learning for NLP", |
| "volume": "", |
| "issue": "", |
| "pages": "191--200", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Adam Trischler, Tong Wang, Xingdi Yuan, Justin Harris, Alessandro Sordoni, Philip Bachman, and Kaheer Suleman. 2017. Newsqa: A machine comprehension dataset. In Proceedings of the 2nd Workshop on Representation Learning for NLP, pages 191-200.", |
| "links": null |
| }, |
| "BIBREF27": { |
| "ref_id": "b27", |
| "title": "Semantically conditioned lstm-based natural language generation for spoken dialogue systems", |
| "authors": [ |
| { |
| "first": "Milica", |
| "middle": [], |
| "last": "Tsung-Hsien Wen", |
| "suffix": "" |
| }, |
| { |
| "first": "Nikola", |
| "middle": [], |
| "last": "Gasic", |
| "suffix": "" |
| }, |
| { |
| "first": "Pei-Hao", |
| "middle": [], |
| "last": "Mrk\u0161i\u0107", |
| "suffix": "" |
| }, |
| { |
| "first": "David", |
| "middle": [], |
| "last": "Su", |
| "suffix": "" |
| }, |
| { |
| "first": "Steve", |
| "middle": [], |
| "last": "Vandyke", |
| "suffix": "" |
| }, |
| { |
| "first": "", |
| "middle": [], |
| "last": "Young", |
| "suffix": "" |
| } |
| ], |
| "year": 2015, |
| "venue": "Proceedings of the 2015 Conference on Empirical Methods in Natural Language Processing", |
| "volume": "", |
| "issue": "", |
| "pages": "1711--1721", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Tsung-Hsien Wen, Milica Gasic, Nikola Mrk\u0161i\u0107, Pei- Hao Su, David Vandyke, and Steve Young. 2015. Se- mantically conditioned lstm-based natural language generation for spoken dialogue systems. In Proceed- ings of the 2015 Conference on Empirical Methods in Natural Language Processing, pages 1711-1721.", |
| "links": null |
| }, |
| "BIBREF28": { |
| "ref_id": "b28", |
| "title": "Answerfact: Fact checking in product question answering", |
| "authors": [ |
| { |
| "first": "Wenxuan", |
| "middle": [], |
| "last": "Zhang", |
| "suffix": "" |
| }, |
| { |
| "first": "Yang", |
| "middle": [], |
| "last": "Deng", |
| "suffix": "" |
| }, |
| { |
| "first": "Jing", |
| "middle": [], |
| "last": "Ma", |
| "suffix": "" |
| }, |
| { |
| "first": "Wai", |
| "middle": [], |
| "last": "Lam", |
| "suffix": "" |
| } |
| ], |
| "year": 2020, |
| "venue": "Proceedings of the 2020 Conference on Empirical Methods in Natural Language Processing (EMNLP)", |
| "volume": "", |
| "issue": "", |
| "pages": "2407--2417", |
| "other_ids": {}, |
| "num": null, |
| "urls": [], |
| "raw_text": "Wenxuan Zhang, Yang Deng, Jing Ma, and Wai Lam. 2020. Answerfact: Fact checking in product question answering. In Proceedings of the 2020 Conference on Empirical Methods in Natural Language Process- ing (EMNLP), pages 2407-2417.", |
| "links": null |
| } |
| }, |
| "ref_entries": { |
| "FIGREF0": { |
| "text": ": p@1 in zeroshot/finetuned settings.", |
| "uris": null, |
| "type_str": "figure", |
| "num": null |
| }, |
| "FIGREF1": { |
| "text": ": p@1 with varying input formats.", |
| "uris": null, |
| "type_str": "figure", |
| "num": null |
| }, |
| "TABREF0": { |
| "type_str": "table", |
| "num": null, |
| "text": "Examples of question-data-text triples in the dataset. The data features diverse forms of semi-structures like key-value pairs, lists and hierarchies.", |
| "content": "<table/>", |
| "html": null |
| }, |
| "TABREF2": { |
| "type_str": "table", |
| "num": null, |
| "text": "Different instances of the attribute type \"power_source_type\" and human annotated text.", |
| "content": "<table/>", |
| "html": null |
| }, |
| "TABREF4": { |
| "type_str": "table", |
| "num": null, |
| "text": "", |
| "content": "<table><tr><td>: Automatic Metric and human evaluation Results for Answer Presentation</td></tr><tr><td>notated text that match with the attribute values,</td></tr><tr><td>replacing them by a tag in the input attribute, a com-</td></tr><tr><td>mon technique used in data-to-text generation (Wen</td></tr></table>", |
| "html": null |
| } |
| } |
| } |
| } |