[{"venue": "ACL", "title": "FIESTA: Fast IdEntification of State-of-The-Art models using adaptive bandit algorithms", "abstract": "We present FIESTA, a model selection approach that significantly reduces the computational resources required to reliably identify state-of-the-art performance from large collections of candidate models. Despite being known to produce unreliable comparisons, it is still common practice to compare model evaluations based on single choices of random seeds. We show that reliable model selection also requires evaluations based on multiple train-test splits (contrary to common practice in many shared tasks). Using bandit theory from the statistics literature, we are able to adaptively determine appropriate numbers of data splits and random seeds used to evaluate each model, focusing computational resources on the evaluation of promising models whilst avoiding wasting evaluations on models with lower performance. Furthermore, our user-friendly Python implementation produces confidence guarantees of correctly selecting the optimal model. We evaluate our algorithms by selecting between 8 target-dependent sentiment analysis methods using dramatically fewer model evaluations than current model selection approaches.", "doc_id": "180fbfb92cce987edc29809639e29c2e", "publication_year": 2019, "sentences": ["we present fiesta , a model selection approach that significantly reduces the computational resources required to reliably identify state - of - the - art performance from large collections of candidate models .", "despite being known to produce unreliable comparisons , it is still common practice to compare model evaluations based on single choices of random seeds .", "we show that reliable model selection also requires evaluations based on multiple train - test splits ( contrary to common practice in many shared tasks ) .", "using bandit theory from the statistics literature , we are able to adaptively determine appropriate numbers of data splits and random seeds used to evaluate each model , focusing computational resources on the evaluation of promising models whilst avoiding wasting evaluations on models with lower performance .", "furthermore , our user - friendly python implementation produces confidence guarantees of correctly selecting the optimal model .", "we evaluate our algorithms by selecting between 8 target - dependent sentiment analysis methods using dramatically fewer model evaluations than current model selection approaches ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [0]}, {"text": "fiesta", "nugget_type": "APP", "argument_type": "Content", "tokens": ["fiesta"], "offsets": [2]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [1]}}, {"event_type": "FAC", "arguments": [{"text": "fiesta", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["fiesta"], "offsets": [2]}, {"text": "computational resources required", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["computational", "resources", "required"], "offsets": [12, 13, 14]}, {"text": "reliably identify", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["reliably", "identify"], "offsets": [16, 17]}], "trigger": {"text": "reduces", "tokens": ["reduces"], "offsets": [10]}}, {"event_type": "RWF", "arguments": [{"text": "unreliable comparisons", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["unreliable", "comparisons"], "offsets": [38, 39]}, {"text": "single choices of random seeds", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["single", "choices", "of", "random", "seeds"], "offsets": [52, 53, 54, 55, 56]}, {"text": "compare", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["compare"], "offsets": [47]}], "trigger": {"text": "produce", "tokens": ["produce"], "offsets": [37]}}, {"event_type": "FAC", "arguments": [{"text": "evaluations", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["evaluations"], "offsets": [66]}, {"text": "reliable model selection", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["reliable", "model", "selection"], "offsets": [61, 62, 63]}, {"text": "based on multiple train - test splits", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["based", "on", "multiple", "train", "-", "test", "splits"], "offsets": [67, 68, 69, 70, 71, 72, 73]}], "trigger": {"text": "requires", "tokens": ["requires"], "offsets": [65]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [93]}, {"text": "evaluate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["evaluate"], "offsets": [109]}, {"text": "focusing computational resources on the evaluation of promising models", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["focusing", "computational", "resources", "on", "the", "evaluation", "of", "promising", "models"], "offsets": [113, 114, 115, 116, 117, 118, 119, 120, 121]}, {"text": "whilst avoiding wasting evaluations on models with lower performance", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["whilst", "avoiding", "wasting", "evaluations", "on", "models", "with", "lower", "performance"], "offsets": [122, 123, 124, 125, 126, 127, 128, 129, 130]}, {"text": "appropriate numbers of", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["appropriate", "numbers", "of"], "offsets": [99, 100, 101]}, {"text": "numbers of data splits", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["numbers", "of", "data", "splits"], "offsets": [100, 101, 102, 103]}, {"text": "numbers of random seeds", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["numbers", "of", "random", "seeds"], "offsets": [100, 101, 105, 106]}], "trigger": {"text": "determine", "tokens": ["determine"], "offsets": [98]}}, {"event_type": "PUR", "arguments": [{"text": "each model", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["each", "model"], "offsets": [110, 111]}], "trigger": {"text": "evaluate", "tokens": ["evaluate"], "offsets": [109]}}, {"event_type": "FAC", "arguments": [{"text": "confidence", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["confidence"], "offsets": [141]}, {"text": "user - friendly python implementation", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["user", "-", "friendly", "python", "implementation"], "offsets": [135, 136, 137, 138, 139]}, {"text": "guarantees of correctly selecting the optimal model", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["guarantees", "of", "correctly", "selecting", "the", "optimal", "model"], "offsets": [142, 143, 144, 145, 146, 147, 148]}], "trigger": {"text": "produces", "tokens": ["produces"], "offsets": [140]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [150]}, {"text": "fiesta", "nugget_type": "APP", "argument_type": "Content", "tokens": ["fiesta"], "offsets": [2]}, {"text": "8 target - dependent sentiment analysis methods", "nugget_type": "APP", "argument_type": "Content", "tokens": ["8", "target", "-", "dependent", "sentiment", "analysis", "methods"], "offsets": [157, 158, 159, 160, 161, 162, 163]}, {"text": "evaluate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["evaluate"], "offsets": [151]}], "trigger": {"text": "selecting between", "tokens": ["selecting", "between"], "offsets": [155, 156]}}, {"event_type": "CMP", "arguments": [{"text": "fiesta", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["fiesta"], "offsets": [2]}, {"text": "model evaluations", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["model", "evaluations"], "offsets": [167, 168]}, {"text": "current model selection approaches", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["current", "model", "selection", "approaches"], "offsets": [170, 171, 172, 173]}, {"text": "dramatically fewer", "nugget_type": "STR", "argument_type": "Result", "tokens": ["dramatically", "fewer"], "offsets": [165, 166]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [164]}}, {"event_type": "PUR", "arguments": [{"text": "state - of - the - art performance", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["state", "-", "of", "-", "the", "-", "art", "performance"], "offsets": [18, 19, 20, 21, 22, 23, 24, 25]}, {"text": "from large collections of candidate models", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "large", "collections", "of", "candidate", "models"], "offsets": [26, 27, 28, 29, 30, 31]}], "trigger": {"text": "reliably identify", "tokens": ["reliably", "identify"], "offsets": [16, 17]}}, {"event_type": "PUR", "arguments": [{"text": "model evaluations", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["model", "evaluations"], "offsets": [48, 49]}], "trigger": {"text": "compare", "tokens": ["compare"], "offsets": [47]}}, {"event_type": "PUR", "arguments": [{"text": "fiesta", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["fiesta"], "offsets": [2]}], "trigger": {"text": "evaluate", "tokens": ["evaluate"], "offsets": [151]}}], "document": ["we", "present", "fiesta", ",", "a", "model", "selection", "approach", "that", "significantly", "reduces", "the", "computational", "resources", "required", "to", "reliably", "identify", "state", "-", "of", "-", "the", "-", "art", "performance", "from", "large", "collections", "of", "candidate", "models", ".", "despite", "being", "known", "to", "produce", "unreliable", "comparisons", ",", "it", "is", "still", "common", "practice", "to", "compare", "model", "evaluations", "based", "on", "single", "choices", "of", "random", "seeds", ".", "we", "show", "that", "reliable", "model", "selection", "also", "requires", "evaluations", "based", "on", "multiple", "train", "-", "test", "splits", "(", "contrary", "to", "common", "practice", "in", "many", "shared", "tasks", ")", ".", "using", "bandit", "theory", "from", "the", "statistics", "literature", ",", "we", "are", "able", "to", "adaptively", "determine", "appropriate", "numbers", "of", "data", "splits", "and", "random", "seeds", "used", "to", "evaluate", "each", "model", ",", "focusing", "computational", "resources", "on", "the", "evaluation", "of", "promising", "models", "whilst", "avoiding", "wasting", "evaluations", "on", "models", "with", "lower", "performance", ".", "furthermore", ",", "our", "user", "-", "friendly", "python", "implementation", "produces", "confidence", "guarantees", "of", "correctly", "selecting", "the", "optimal", "model", ".", "we", "evaluate", "our", "algorithms", "by", "selecting", "between", "8", "target", "-", "dependent", "sentiment", "analysis", "methods", "using", "dramatically", "fewer", "model", "evaluations", "than", "current", "model", "selection", "approaches", "."]}, {"venue": "ACL", "title": "Cross-replication Reliability - An Empirical Approach to Interpreting Inter-rater Reliability", "abstract": "When collecting annotations and labeled data from humans, a standard practice is to use inter-rater reliability (IRR) as a measure of data goodness (Hallgren, 2012). Metrics such as Krippendorff\u2019s alpha or Cohen\u2019s kappa are typically required to be above a threshold of 0.6 (Landis and Koch, 1977). These absolute thresholds are unreasonable for crowdsourced data from annotators with high cultural and training variances, especially on subjective topics. We present a new alternative to interpreting IRR that is more empirical and contextualized. It is based upon benchmarking IRR against baseline measures in a replication, one of which is a novel cross-replication reliability (xRR) measure based on Cohen\u2019s (1960) kappa. We call this approach the xRR framework. We opensource a replication dataset of 4 million human judgements of facial expressions and analyze it with the proposed framework. We argue this framework can be used to measure the quality of crowdsourced datasets.", "doc_id": "f500e30b1dd8d82a97fcce9fd26bcd74", "publication_year": 2021, "sentences": ["when collecting annotations and labeled data from humans , a standard practice is to use inter - rater reliability ( irr ) as a measure of data goodness ( hallgren , 2012 ) .", "metrics such as krippendorff \u2019 s alpha or cohen \u2019 s kappa are typically required to be above a threshold of 0 . 6 ( landis and koch , 1977 ) .", "these absolute thresholds are unreasonable for crowdsourced data from annotators with high cultural and training variances , especially on subjective topics .", "we present a new alternative to interpreting irr that is more empirical and contextualized .", "it is based upon benchmarking irr against baseline measures in a replication , one of which is a novel cross - replication reliability ( xrr ) measure based on cohen \u2019 s ( 1960 ) kappa .", "we call this approach the xrr framework .", "we opensource a replication dataset of 4 million human judgements of facial expressions and analyze it with the proposed framework .", "we argue this framework can be used to measure the quality of crowdsourced datasets ."], "events": [{"event_type": "ITT", "arguments": [{"text": "inter - rater reliability", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["inter", "-", "rater", "reliability"], "offsets": [15, 16, 17, 18]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [14]}}, {"event_type": "RWF", "arguments": [{"text": "unreasonable", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["unreasonable"], "offsets": [70]}, {"text": "metrics such as krippendorff \u2019 s alpha or cohen \u2019 s kappa", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["metrics", "such", "as", "krippendorff", "\u2019", "s", "alpha", "or", "cohen", "\u2019", "s", "kappa"], "offsets": [34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45]}], "trigger": {"text": "unreasonable", "tokens": ["unreasonable"], "offsets": [70]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [88]}, {"text": "alternative", "nugget_type": "APP", "argument_type": "Content", "tokens": ["alternative"], "offsets": [92]}, {"text": "interpreting", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["interpreting"], "offsets": [94]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [89]}}, {"event_type": "PUR", "arguments": [{"text": "inter - rater reliability", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["inter", "-", "rater", "reliability"], "offsets": [15, 16, 17, 18]}], "trigger": {"text": "interpreting", "tokens": ["interpreting"], "offsets": [94]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [148]}, {"text": "replication dataset", "nugget_type": "DST", "argument_type": "Content", "tokens": ["replication", "dataset"], "offsets": [151, 152]}], "trigger": {"text": "opensource", "tokens": ["opensource"], "offsets": [149]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [169]}, {"text": "measure", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["measure"], "offsets": [177]}], "trigger": {"text": "argue", "tokens": ["argue"], "offsets": [170]}}, {"event_type": "FAC", "arguments": [{"text": "quality of crowdsourced datasets", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["quality", "of", "crowdsourced", "datasets"], "offsets": [179, 180, 181, 182]}, {"text": "xrr framework", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["cross", "-", "replication", "reliability", "framework"], "offsets": [122, 123, 124, 125, 146]}], "trigger": {"text": "measure", "tokens": ["measure"], "offsets": [177]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [148]}, {"text": "replication dataset", "nugget_type": "DST", "argument_type": "Content", "tokens": ["replication", "dataset"], "offsets": [151, 152]}, {"text": "with the proposed framework", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "the", "proposed", "framework"], "offsets": [164, 165, 166, 167]}], "trigger": {"text": "analyze", "tokens": ["analyze"], "offsets": [162]}}], "document": ["when", "collecting", "annotations", "and", "labeled", "data", "from", "humans", ",", "a", "standard", "practice", "is", "to", "use", "inter", "-", "rater", "reliability", "(", "irr", ")", "as", "a", "measure", "of", "data", "goodness", "(", "hallgren", ",", "2012", ")", ".", "metrics", "such", "as", "krippendorff", "\u2019", "s", "alpha", "or", "cohen", "\u2019", "s", "kappa", "are", "typically", "required", "to", "be", "above", "a", "threshold", "of", "0", ".", "6", "(", "landis", "and", "koch", ",", "1977", ")", ".", "these", "absolute", "thresholds", "are", "unreasonable", "for", "crowdsourced", "data", "from", "annotators", "with", "high", "cultural", "and", "training", "variances", ",", "especially", "on", "subjective", "topics", ".", "we", "present", "a", "new", "alternative", "to", "interpreting", "irr", "that", "is", "more", "empirical", "and", "contextualized", ".", "it", "is", "based", "upon", "benchmarking", "irr", "against", "baseline", "measures", "in", "a", "replication", ",", "one", "of", "which", "is", "a", "novel", "cross", "-", "replication", "reliability", "(", "xrr", ")", "measure", "based", "on", "cohen", "\u2019", "s", "(", "1960", ")", "kappa", ".", "we", "call", "this", "approach", "the", "xrr", "framework", ".", "we", "opensource", "a", "replication", "dataset", "of", "4", "million", "human", "judgements", "of", "facial", "expressions", "and", "analyze", "it", "with", "the", "proposed", "framework", ".", "we", "argue", "this", "framework", "can", "be", "used", "to", "measure", "the", "quality", "of", "crowdsourced", "datasets", "."]}, {"venue": "ACL", "title": "Making Pre-trained Language Models Better Few-shot Learners", "abstract": "The recent GPT-3 model (Brown et al., 2020) achieves remarkable few-shot performance solely by leveraging a natural-language prompt and a few task demonstrations as input context. Inspired by their findings, we study few-shot learning in a more practical scenario, where we use smaller language models for which fine-tuning is computationally efficient. We present LM-BFF\u2014better few-shot fine-tuning of language models\u2014a suite of simple and complementary techniques for fine-tuning language models on a small number of annotated examples. Our approach includes (1) prompt-based fine-tuning together with a novel pipeline for automating prompt generation; and (2) a refined strategy for dynamically and selectively incorporating demonstrations into each context. Finally, we present a systematic evaluation for analyzing few-shot performance on a range of NLP tasks, including classification and regression. Our experiments demonstrate that our methods combine to dramatically outperform standard fine-tuning procedures in this low resource setting, achieving up to 30% absolute improvement, and 11% on average across all tasks. Our approach makes minimal assumptions on task resources and domain expertise, and hence constitutes a strong task-agnostic method for few-shot learning.", "doc_id": "fd4526d8ccd6b35ae6a285c2e34721f3", "publication_year": 2021, "sentences": ["the recent gpt - 3 model ( brown et al . , 2020 ) achieves remarkable few - shot performance solely by leveraging a natural - language prompt and a few task demonstrations as input context .", "inspired by their findings , we study few - shot learning in a more practical scenario , where we use smaller language models for which fine - tuning is computationally efficient .", "we present lm - bff \u2014 better few - shot fine - tuning of language models \u2014 a suite of simple and complementary techniques for fine - tuning language models on a small number of annotated examples .", "our approach includes ( 1 ) prompt - based fine - tuning together with a novel pipeline for automating prompt generation ; and ( 2 ) a refined strategy for dynamically and selectively incorporating demonstrations into each context .", "finally , we present a systematic evaluation for analyzing few - shot performance on a range of nlp tasks , including classification and regression .", "our experiments demonstrate that our methods combine to dramatically outperform standard fine - tuning procedures in this low resource setting , achieving up to 30 % absolute improvement , and 11 % on average across all tasks .", "our approach makes minimal assumptions on task resources and domain expertise , and hence constitutes a strong task - agnostic method for few - shot learning ."], "events": [{"event_type": "RWS", "arguments": [{"text": "gpt - 3 model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["gpt", "-", "3", "model"], "offsets": [2, 3, 4, 5]}, {"text": "natural - language prompt and a few task demonstrations", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["natural", "-", "language", "prompt", "and", "a", "few", "task", "demonstrations"], "offsets": [24, 25, 26, 27, 28, 29, 30, 31, 32]}, {"text": "achieves", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["achieves"], "offsets": [14]}, {"text": "input context", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["input", "context"], "offsets": [34, 35]}], "trigger": {"text": "leveraging", "tokens": ["leveraging"], "offsets": [22]}}, {"event_type": "PUR", "arguments": [{"text": "remarkable few - shot performance", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["remarkable", "few", "-", "shot", "performance"], "offsets": [15, 16, 17, 18, 19]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [14]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [42]}, {"text": "few - shot learning", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["few", "-", "shot", "learning"], "offsets": [44, 45, 46, 47]}, {"text": "in a more practical scenario", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "a", "more", "practical", "scenario"], "offsets": [48, 49, 50, 51, 52]}], "trigger": {"text": "study", "tokens": ["study"], "offsets": [43]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [69]}, {"text": "fine - tuning", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["fine", "-", "tuning"], "offsets": [94, 95, 96]}, {"text": "lm - bff \u2014 better few - shot fine - tuning of language models", "nugget_type": "APP", "argument_type": "Content", "tokens": ["lm", "-", "bff", "\u2014", "better", "few", "-", "shot", "fine", "-", "tuning", "of", "language", "models"], "offsets": [71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [70]}}, {"event_type": "PUR", "arguments": [{"text": "language models", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["language", "models"], "offsets": [97, 98]}, {"text": "on a small number of annotated examples", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "a", "small", "number", "of", "annotated", "examples"], "offsets": [99, 100, 101, 102, 103, 104, 105]}], "trigger": {"text": "fine - tuning", "tokens": ["fine", "-", "tuning"], "offsets": [94, 95, 96]}}, {"event_type": "PUR", "arguments": [{"text": "prompt generation", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["prompt", "generation"], "offsets": [126, 127]}], "trigger": {"text": "automating", "tokens": ["automating"], "offsets": [125]}}, {"event_type": "PUR", "arguments": [{"text": "demonstrations", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["demonstrations"], "offsets": [141]}, {"text": "into each context", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["into", "each", "context"], "offsets": [142, 143, 144]}], "trigger": {"text": "dynamically and selectively incorporating", "tokens": ["dynamically", "and", "selectively", "incorporating"], "offsets": [137, 138, 139, 140]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [148]}, {"text": "systematic evaluation", "nugget_type": "APP", "argument_type": "Content", "tokens": ["systematic", "evaluation"], "offsets": [151, 152]}, {"text": "analyzing", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["analyzing"], "offsets": [154]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [149]}}, {"event_type": "PUR", "arguments": [{"text": "few - shot performance", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["few", "-", "shot", "performance"], "offsets": [155, 156, 157, 158]}, {"text": "on a range of nlp tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "a", "range", "of", "nlp", "tasks"], "offsets": [159, 160, 161, 162, 163, 164]}], "trigger": {"text": "analyzing", "tokens": ["analyzing"], "offsets": [154]}}, {"event_type": "CMP", "arguments": [{"text": "standard fine - tuning procedures", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["standard", "fine", "-", "tuning", "procedures"], "offsets": [181, 182, 183, 184, 185]}, {"text": "in this low resource setting", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "this", "low", "resource", "setting"], "offsets": [186, 187, 188, 189, 190]}, {"text": "lm - bff \u2014 better few - shot fine - tuning of language models", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["lm", "-", "bff", "\u2014", "better", "few", "-", "shot", "fine", "-", "tuning", "of", "language", "models"], "offsets": [71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84]}], "trigger": {"text": "dramatically outperform", "tokens": ["dramatically", "outperform"], "offsets": [179, 180]}}, {"event_type": "FAC", "arguments": [{"text": "30 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["30", "%"], "offsets": [195, 196]}, {"text": "lm - bff \u2014 better few - shot fine - tuning of language models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["lm", "-", "bff", "\u2014", "better", "few", "-", "shot", "fine", "-", "tuning", "of", "language", "models"], "offsets": [71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84]}, {"text": "absolute improvement", "nugget_type": "STR", "argument_type": "Object", "tokens": ["absolute", "improvement"], "offsets": [197, 198]}], "trigger": {"text": "achieving", "tokens": ["achieving"], "offsets": [192]}}, {"event_type": "FAC", "arguments": [{"text": "minimal assumptions", "nugget_type": "APP", "argument_type": "Object", "tokens": ["minimal", "assumptions"], "offsets": [212, 213]}, {"text": "approach", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["approach"], "offsets": [210]}, {"text": "task resources and domain expertise", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["task", "resources", "and", "domain", "expertise"], "offsets": [215, 216, 217, 218, 219]}], "trigger": {"text": "makes", "tokens": ["makes"], "offsets": [211]}}, {"event_type": "FAC", "arguments": [{"text": "few - shot learning", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["few", "-", "shot", "learning"], "offsets": [231, 232, 233, 234]}, {"text": "strong task - agnostic method", "nugget_type": "APP", "argument_type": "Object", "tokens": ["strong", "task", "-", "agnostic", "method"], "offsets": [225, 226, 227, 228, 229]}, {"text": "lm - bff \u2014 better few - shot fine - tuning of language models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["lm", "-", "bff", "\u2014", "better", "few", "-", "shot", "fine", "-", "tuning", "of", "language", "models"], "offsets": [71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84]}], "trigger": {"text": "constitutes", "tokens": ["constitutes"], "offsets": [223]}}, {"event_type": "WKS", "arguments": [{"text": "prompt - based fine - tuning", "nugget_type": "APP", "argument_type": "Content", "tokens": ["prompt", "-", "based", "fine", "-", "tuning"], "offsets": [113, 114, 115, 116, 117, 118]}, {"text": "pipeline", "nugget_type": "APP", "argument_type": "Content", "tokens": ["pipeline"], "offsets": [123]}, {"text": "automating", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["automating"], "offsets": [125]}, {"text": "refined strategy", "nugget_type": "APP", "argument_type": "Content", "tokens": ["refined", "strategy"], "offsets": [134, 135]}, {"text": "dynamically and selectively incorporating", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["dynamically", "and", "selectively", "incorporating"], "offsets": [137, 138, 139, 140]}], "trigger": {"text": "includes", "tokens": ["includes"], "offsets": [109]}}, {"event_type": "FIN", "arguments": [{"text": "dramatically outperform", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["dramatically", "outperform"], "offsets": [179, 180]}, {"text": "achieving", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["achieving"], "offsets": [192]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [173]}}, {"event_type": "FAC", "arguments": [{"text": "lm - bff \u2014 better few - shot fine - tuning of language models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["lm", "-", "bff", "\u2014", "better", "few", "-", "shot", "fine", "-", "tuning", "of", "language", "models"], "offsets": [71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84]}, {"text": "11 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["11", "%"], "offsets": [201, 202]}, {"text": "average", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["average"], "offsets": [204]}, {"text": "across all tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["across", "all", "tasks"], "offsets": [205, 206, 207]}], "trigger": {"text": "achieving", "tokens": ["achieving"], "offsets": [192]}}], "document": ["the", "recent", "gpt", "-", "3", "model", "(", "brown", "et", "al", ".", ",", "2020", ")", "achieves", "remarkable", "few", "-", "shot", "performance", "solely", "by", "leveraging", "a", "natural", "-", "language", "prompt", "and", "a", "few", "task", "demonstrations", "as", "input", "context", ".", "inspired", "by", "their", "findings", ",", "we", "study", "few", "-", "shot", "learning", "in", "a", "more", "practical", "scenario", ",", "where", "we", "use", "smaller", "language", "models", "for", "which", "fine", "-", "tuning", "is", "computationally", "efficient", ".", "we", "present", "lm", "-", "bff", "\u2014", "better", "few", "-", "shot", "fine", "-", "tuning", "of", "language", "models", "\u2014", "a", "suite", "of", "simple", "and", "complementary", "techniques", "for", "fine", "-", "tuning", "language", "models", "on", "a", "small", "number", "of", "annotated", "examples", ".", "our", "approach", "includes", "(", "1", ")", "prompt", "-", "based", "fine", "-", "tuning", "together", "with", "a", "novel", "pipeline", "for", "automating", "prompt", "generation", ";", "and", "(", "2", ")", "a", "refined", "strategy", "for", "dynamically", "and", "selectively", "incorporating", "demonstrations", "into", "each", "context", ".", "finally", ",", "we", "present", "a", "systematic", "evaluation", "for", "analyzing", "few", "-", "shot", "performance", "on", "a", "range", "of", "nlp", "tasks", ",", "including", "classification", "and", "regression", ".", "our", "experiments", "demonstrate", "that", "our", "methods", "combine", "to", "dramatically", "outperform", "standard", "fine", "-", "tuning", "procedures", "in", "this", "low", "resource", "setting", ",", "achieving", "up", "to", "30", "%", "absolute", "improvement", ",", "and", "11", "%", "on", "average", "across", "all", "tasks", ".", "our", "approach", "makes", "minimal", "assumptions", "on", "task", "resources", "and", "domain", "expertise", ",", "and", "hence", "constitutes", "a", "strong", "task", "-", "agnostic", "method", "for", "few", "-", "shot", "learning", "."]}, {"venue": "ACL", "title": "Line Graph Enhanced AMR-to-Text Generation with Mix-Order Graph Attention Networks", "abstract": "Efficient structure encoding for graphs with labeled edges is an important yet challenging point in many graph-based models. This work focuses on AMR-to-text generation \u2013 A graph-to-sequence task aiming to recover natural language from Abstract Meaning Representations (AMR). Existing graph-to-sequence approaches generally utilize graph neural networks as their encoders, which have two limitations: 1) The message propagation process in AMR graphs is only guided by the first-order adjacency information. 2) The relationships between labeled edges are not fully considered. In this work, we propose a novel graph encoding framework which can effectively explore the edge relations. We also adopt graph attention networks with higher-order neighborhood information to encode the rich structure in AMR graphs. Experiment results show that our approach obtains new state-of-the-art performance on English AMR benchmark datasets. The ablation analyses also demonstrate that both edge relations and higher-order information are beneficial to graph-to-sequence modeling.", "doc_id": "0a5d76746b1467c01847f0d7c78f75f0", "publication_year": 2020, "sentences": ["efficient structure encoding for graphs with labeled edges is an important yet challenging point in many graph - based models .", "this work focuses on amr - to - text generation \u2013 a graph - to - sequence task aiming to recover natural language from abstract meaning representations ( amr ) .", "existing graph - to - sequence approaches generally utilize graph neural networks as their encoders , which have two limitations : 1 ) the message propagation process in amr graphs is only guided by the first - order adjacency information .", "2 ) the relationships between labeled edges are not fully considered .", "in this work , we propose a novel graph encoding framework which can effectively explore the edge relations .", "we also adopt graph attention networks with higher - order neighborhood information to encode the rich structure in amr graphs .", "experiment results show that our approach obtains new state - of - the - art performance on english amr benchmark datasets .", "the ablation analyses also demonstrate that both edge relations and higher - order information are beneficial to graph - to - sequence modeling ."], "events": [{"event_type": "RWS", "arguments": [{"text": "graph - to - sequence approaches", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["graph", "-", "to", "-", "sequence", "approaches"], "offsets": [53, 54, 55, 56, 57, 58]}, {"text": "graph neural networks", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["graph", "neural", "networks"], "offsets": [61, 62, 63]}, {"text": "encoders", "nugget_type": "MOD", "argument_type": "Target", "tokens": ["encoders"], "offsets": [66]}], "trigger": {"text": "utilize", "tokens": ["utilize"], "offsets": [60]}}, {"event_type": "RWF", "arguments": [{"text": "message propagation process", "nugget_type": "MOD", "argument_type": "Concern", "tokens": ["message", "propagation", "process"], "offsets": [76, 77, 78]}, {"text": "in amr graphs", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "amr", "graphs"], "offsets": [79, 80, 81]}, {"text": "only", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["only"], "offsets": [83]}, {"text": "first - order adjacency information", "nugget_type": "FEA", "argument_type": "Fault", "tokens": ["first", "-", "order", "adjacency", "information"], "offsets": [87, 88, 89, 90, 91]}], "trigger": {"text": "guided", "tokens": ["guided"], "offsets": [84]}}, {"event_type": "RWF", "arguments": [{"text": "not fully", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["not", "fully"], "offsets": [101, 102]}, {"text": "graph - to - sequence approaches", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["graph", "-", "to", "-", "sequence", "approaches"], "offsets": [53, 54, 55, 56, 57, 58]}, {"text": "relationships between labeled edges", "nugget_type": "FEA", "argument_type": "Fault", "tokens": ["relationships", "between", "labeled", "edges"], "offsets": [96, 97, 98, 99]}], "trigger": {"text": "considered", "tokens": ["considered"], "offsets": [103]}}, {"event_type": "PRP", "arguments": [{"text": "explore", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["explore"], "offsets": [119]}, {"text": "graph encoding framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["graph", "encoding", "framework"], "offsets": [113, 114, 115]}, {"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [109]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [110]}}, {"event_type": "PUR", "arguments": [{"text": "edge relations", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["edge", "relations"], "offsets": [121, 122]}], "trigger": {"text": "explore", "tokens": ["explore"], "offsets": [119]}}, {"event_type": "MDS", "arguments": [{"text": "encode", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["encode"], "offsets": [137]}, {"text": "graph attention networks", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["graph", "attention", "networks"], "offsets": [127, 128, 129]}, {"text": "higher - order neighborhood information", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["higher", "-", "order", "neighborhood", "information"], "offsets": [131, 132, 133, 134, 135]}], "trigger": {"text": "adopt", "tokens": ["adopt"], "offsets": [126]}}, {"event_type": "PUR", "arguments": [{"text": "rich structure", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["rich", "structure"], "offsets": [139, 140]}, {"text": "in amr graphs", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "amr", "graphs"], "offsets": [141, 142, 143]}], "trigger": {"text": "encode", "tokens": ["encode"], "offsets": [137]}}, {"event_type": "FIN", "arguments": [{"text": "obtains", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["obtains"], "offsets": [151]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [147]}}, {"event_type": "FAC", "arguments": [{"text": "english amr benchmark datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["english", "amr", "benchmark", "datasets"], "offsets": [162, 163, 164, 165]}, {"text": "graph encoding framework", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["graph", "encoding", "framework"], "offsets": [113, 114, 115]}, {"text": "state - of - the - art performance", "nugget_type": "STR", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art", "performance"], "offsets": [153, 154, 155, 156, 157, 158, 159, 160]}], "trigger": {"text": "obtains", "tokens": ["obtains"], "offsets": [151]}}, {"event_type": "FIN", "arguments": [{"text": "beneficial", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["beneficial"], "offsets": [182]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [171]}}, {"event_type": "FAC", "arguments": [{"text": "edge relations", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["edge", "relations"], "offsets": [174, 175]}, {"text": "higher - order information", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["higher", "-", "order", "information"], "offsets": [177, 178, 179, 180]}, {"text": "graph - to - sequence modeling", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["graph", "-", "to", "-", "sequence", "modeling"], "offsets": [184, 185, 186, 187, 188, 189]}], "trigger": {"text": "beneficial", "tokens": ["beneficial"], "offsets": [182]}}, {"event_type": "ITT", "arguments": [{"text": "amr - to - text generation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["amr", "-", "to", "-", "text", "generation"], "offsets": [25, 26, 27, 28, 29, 30]}], "trigger": {"text": "focuses", "tokens": ["focuses"], "offsets": [23]}}], "document": ["efficient", "structure", "encoding", "for", "graphs", "with", "labeled", "edges", "is", "an", "important", "yet", "challenging", "point", "in", "many", "graph", "-", "based", "models", ".", "this", "work", "focuses", "on", "amr", "-", "to", "-", "text", "generation", "\u2013", "a", "graph", "-", "to", "-", "sequence", "task", "aiming", "to", "recover", "natural", "language", "from", "abstract", "meaning", "representations", "(", "amr", ")", ".", "existing", "graph", "-", "to", "-", "sequence", "approaches", "generally", "utilize", "graph", "neural", "networks", "as", "their", "encoders", ",", "which", "have", "two", "limitations", ":", "1", ")", "the", "message", "propagation", "process", "in", "amr", "graphs", "is", "only", "guided", "by", "the", "first", "-", "order", "adjacency", "information", ".", "2", ")", "the", "relationships", "between", "labeled", "edges", "are", "not", "fully", "considered", ".", "in", "this", "work", ",", "we", "propose", "a", "novel", "graph", "encoding", "framework", "which", "can", "effectively", "explore", "the", "edge", "relations", ".", "we", "also", "adopt", "graph", "attention", "networks", "with", "higher", "-", "order", "neighborhood", "information", "to", "encode", "the", "rich", "structure", "in", "amr", "graphs", ".", "experiment", "results", "show", "that", "our", "approach", "obtains", "new", "state", "-", "of", "-", "the", "-", "art", "performance", "on", "english", "amr", "benchmark", "datasets", ".", "the", "ablation", "analyses", "also", "demonstrate", "that", "both", "edge", "relations", "and", "higher", "-", "order", "information", "are", "beneficial", "to", "graph", "-", "to", "-", "sequence", "modeling", "."]}, {"venue": "ACL", "title": "Modeling Fine-Grained Entity Types with Box Embeddings", "abstract": "Neural entity typing models typically represent fine-grained entity types as vectors in a high-dimensional space, but such spaces are not well-suited to modeling these types\u2019 complex interdependencies. We study the ability of box embeddings, which embed concepts as d-dimensional hyperrectangles, to capture hierarchies of types even when these relationships are not defined explicitly in the ontology. Our model represents both types and entity mentions as boxes. Each mention and its context are fed into a BERT-based model to embed that mention in our box space; essentially, this model leverages typological clues present in the surface text to hypothesize a type representation for the mention. Box containment can then be used to derive both the posterior probability of a mention exhibiting a given type and the conditional probability relations between types themselves. We compare our approach with a vector-based typing model and observe state-of-the-art performance on several entity typing benchmarks. In addition to competitive typing performance, our box-based model shows better performance in prediction consistency (predicting a supertype and a subtype together) and confidence (i.e., calibration), demonstrating that the box-based model captures the latent type hierarchies better than the vector-based model does.", "doc_id": "726bcb78bb2e2b8d960b0c763cd93cff", "publication_year": 2021, "sentences": ["neural entity typing models typically represent fine - grained entity types as vectors in a high - dimensional space , but such spaces are not well - suited to modeling these types \u2019 complex interdependencies .", "we study the ability of box embeddings , which embed concepts as d - dimensional hyperrectangles , to capture hierarchies of types even when these relationships are not defined explicitly in the ontology .", "our model represents both types and entity mentions as boxes .", "each mention and its context are fed into a bert - based model to embed that mention in our box space ; essentially , this model leverages typological clues present in the surface text to hypothesize a type representation for the mention .", "box containment can then be used to derive both the posterior probability of a mention exhibiting a given type and the conditional probability relations between types themselves .", "we compare our approach with a vector - based typing model and observe state - of - the - art performance on several entity typing benchmarks .", "in addition to competitive typing performance , our box - based model shows better performance in prediction consistency ( predicting a supertype and a subtype together ) and confidence ( i . e . , calibration ) , demonstrating that the box - based model captures the latent type hierarchies better than the vector - based model does ."], "events": [{"event_type": "ITT", "arguments": [{"text": "neural entity typing models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["neural", "entity", "typing", "models"], "offsets": [0, 1, 2, 3]}], "trigger": {"text": "represent", "tokens": ["represent"], "offsets": [5]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [36]}, {"text": "ability of box embeddings", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["ability", "of", "box", "embeddings"], "offsets": [39, 40, 41, 42]}, {"text": "capture", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["capture"], "offsets": [54]}], "trigger": {"text": "study", "tokens": ["study"], "offsets": [37]}}, {"event_type": "PUR", "arguments": [{"text": "hierarchies of types", "nugget_type": "MOD", "argument_type": "Aim", "tokens": ["hierarchies", "of", "types"], "offsets": [55, 56, 57]}], "trigger": {"text": "capture", "tokens": ["capture"], "offsets": [54]}}, {"event_type": "MDS", "arguments": [{"text": "bert - based model", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["bert", "-", "based", "model"], "offsets": [90, 91, 92, 93]}, {"text": "each mention", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["each", "mention"], "offsets": [81, 82]}, {"text": "context", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["context"], "offsets": [85]}, {"text": "embed", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["embed"], "offsets": [95]}], "trigger": {"text": "fed", "tokens": ["fed"], "offsets": [87]}}, {"event_type": "PUR", "arguments": [{"text": "mention in our box space", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["mention", "in", "our", "box", "space"], "offsets": [97, 98, 99, 100, 101]}], "trigger": {"text": "embed", "tokens": ["embed"], "offsets": [95]}}, {"event_type": "WKS", "arguments": [{"text": "typological clues present in the surface text", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["typological", "clues", "present", "in", "the", "surface", "text"], "offsets": [108, 109, 110, 111, 112, 113, 114]}, {"text": "hypothesize", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["hypothesize"], "offsets": [116]}], "trigger": {"text": "leverages", "tokens": ["leverages"], "offsets": [107]}}, {"event_type": "PUR", "arguments": [{"text": "type", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["type"], "offsets": [118]}], "trigger": {"text": "hypothesize", "tokens": ["hypothesize"], "offsets": [116]}}, {"event_type": "WKS", "arguments": [{"text": "box containment", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["box", "containment"], "offsets": [124, 125]}, {"text": "derive", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["derive"], "offsets": [131]}], "trigger": {"text": "used", "tokens": ["used"], "offsets": [129]}}, {"event_type": "PUR", "arguments": [{"text": "posterior probability of a mention exhibiting a given type and the conditional probability relations between types themselves", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["posterior", "probability", "of", "a", "mention", "exhibiting", "a", "given", "type", "and", "the", "conditional", "probability", "relations", "between", "types", "themselves"], "offsets": [134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150]}], "trigger": {"text": "derive", "tokens": ["derive"], "offsets": [131]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [152]}, {"text": "our approach with a vector - based typing model", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["our", "approach", "with", "a", "vector", "-", "based", "typing", "model"], "offsets": [154, 155, 156, 157, 158, 159, 160, 161, 162]}], "trigger": {"text": "compare", "tokens": ["compare"], "offsets": [153]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [152]}, {"text": "state - of - the - art performance", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["state", "-", "of", "-", "the", "-", "art", "performance"], "offsets": [165, 166, 167, 168, 169, 170, 171, 172]}, {"text": "on several entity typing benchmarks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "several", "entity", "typing", "benchmarks"], "offsets": [173, 174, 175, 176, 177]}], "trigger": {"text": "observe", "tokens": ["observe"], "offsets": [164]}}, {"event_type": "FAC", "arguments": [{"text": "box - based model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["box", "-", "based", "model"], "offsets": [187, 188, 189, 190]}, {"text": "better", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["better"], "offsets": [192]}, {"text": "performance", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["performance"], "offsets": [193]}, {"text": "prediction consistency", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["prediction", "consistency"], "offsets": [195, 196]}, {"text": "confidence", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["confidence"], "offsets": [207]}], "trigger": {"text": "shows", "tokens": ["shows"], "offsets": [191]}}, {"event_type": "FIN", "arguments": [{"text": "captures", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["captures"], "offsets": [224]}], "trigger": {"text": "demonstrating", "tokens": ["demonstrating"], "offsets": [217]}}, {"event_type": "CMP", "arguments": [{"text": "box - based model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["box", "-", "based", "model"], "offsets": [220, 221, 222, 223]}, {"text": "vector - based model", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["vector", "-", "based", "model"], "offsets": [232, 233, 234, 235]}, {"text": "latent type hierarchies", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["latent", "type", "hierarchies"], "offsets": [226, 227, 228]}, {"text": "better", "nugget_type": "STR", "argument_type": "Result", "tokens": ["better"], "offsets": [229]}], "trigger": {"text": "captures", "tokens": ["captures"], "offsets": [224]}}], "document": ["neural", "entity", "typing", "models", "typically", "represent", "fine", "-", "grained", "entity", "types", "as", "vectors", "in", "a", "high", "-", "dimensional", "space", ",", "but", "such", "spaces", "are", "not", "well", "-", "suited", "to", "modeling", "these", "types", "\u2019", "complex", "interdependencies", ".", "we", "study", "the", "ability", "of", "box", "embeddings", ",", "which", "embed", "concepts", "as", "d", "-", "dimensional", "hyperrectangles", ",", "to", "capture", "hierarchies", "of", "types", "even", "when", "these", "relationships", "are", "not", "defined", "explicitly", "in", "the", "ontology", ".", "our", "model", "represents", "both", "types", "and", "entity", "mentions", "as", "boxes", ".", "each", "mention", "and", "its", "context", "are", "fed", "into", "a", "bert", "-", "based", "model", "to", "embed", "that", "mention", "in", "our", "box", "space", ";", "essentially", ",", "this", "model", "leverages", "typological", "clues", "present", "in", "the", "surface", "text", "to", "hypothesize", "a", "type", "representation", "for", "the", "mention", ".", "box", "containment", "can", "then", "be", "used", "to", "derive", "both", "the", "posterior", "probability", "of", "a", "mention", "exhibiting", "a", "given", "type", "and", "the", "conditional", "probability", "relations", "between", "types", "themselves", ".", "we", "compare", "our", "approach", "with", "a", "vector", "-", "based", "typing", "model", "and", "observe", "state", "-", "of", "-", "the", "-", "art", "performance", "on", "several", "entity", "typing", "benchmarks", ".", "in", "addition", "to", "competitive", "typing", "performance", ",", "our", "box", "-", "based", "model", "shows", "better", "performance", "in", "prediction", "consistency", "(", "predicting", "a", "supertype", "and", "a", "subtype", "together", ")", "and", "confidence", "(", "i", ".", "e", ".", ",", "calibration", ")", ",", "demonstrating", "that", "the", "box", "-", "based", "model", "captures", "the", "latent", "type", "hierarchies", "better", "than", "the", "vector", "-", "based", "model", "does", "."]}, {"venue": "ACL", "title": "Overcoming Catastrophic Forgetting beyond Continual Learning: Balanced Training for Neural Machine Translation", "abstract": "Neural networks tend to gradually forget the previously learned knowledge when learning multiple tasks sequentially from dynamic data distributions. This problem is called catastrophic forgetting, which is a fundamental challenge in the continual learning of neural networks. In this work, we observe that catastrophic forgetting not only occurs in continual learning but also affects the traditional static training. Neural networks, especially neural machine translation models, suffer from catastrophic forgetting even if they learn from a static training set. To be specific, the final model pays imbalanced attention to training samples, where recently exposed samples attract more attention than earlier samples. The underlying cause is that training samples do not get balanced training in each model update, so we name this problem imbalanced training. To alleviate this problem, we propose Complementary Online Knowledge Distillation (COKD), which uses dynamically updated teacher models trained on specific data orders to iteratively provide complementary knowledge to the student model. Experimental results on multiple machine translation tasks show that our method successfully alleviates the problem of imbalanced training and achieves substantial improvements over strong baseline systems.", "doc_id": "dcf75583d2436545e17a53a42db40400", "publication_year": 2022, "sentences": ["neural networks tend to gradually forget the previously learned knowledge when learning multiple tasks sequentially from dynamic data distributions .", "this problem is called catastrophic forgetting , which is a fundamental challenge in the continual learning of neural networks .", "in this work , we observe that catastrophic forgetting not only occurs in continual learning but also affects the traditional static training .", "neural networks , especially neural machine translation models , suffer from catastrophic forgetting even if they learn from a static training set .", "to be specific , the final model pays imbalanced attention to training samples , where recently exposed samples attract more attention than earlier samples .", "the underlying cause is that training samples do not get balanced training in each model update , so we name this problem imbalanced training .", "to alleviate this problem , we propose complementary online knowledge distillation ( cokd ) , which uses dynamically updated teacher models trained on specific data orders to iteratively provide complementary knowledge to the student model .", "experimental results on multiple machine translation tasks show that our method successfully alleviates the problem of imbalanced training and achieves substantial improvements over strong baseline systems ."], "events": [{"event_type": "RWF", "arguments": [{"text": "neural networks", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["neural", "networks"], "offsets": [0, 1]}, {"text": "gradually forget", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["gradually", "forget"], "offsets": [4, 5]}], "trigger": {"text": "gradually forget", "tokens": ["gradually", "forget"], "offsets": [4, 5]}}, {"event_type": "FAC", "arguments": [{"text": "catastrophic forgetting", "nugget_type": "WEA", "argument_type": "Subject", "tokens": ["catastrophic", "forgetting"], "offsets": [47, 48]}, {"text": "continual learning", "nugget_type": "APP", "argument_type": "Object", "tokens": ["continual", "learning"], "offsets": [53, 54]}], "trigger": {"text": "occurs", "tokens": ["occurs"], "offsets": [51]}}, {"event_type": "RWF", "arguments": [{"text": "neural machine translation models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["neural", "machine", "translation", "models"], "offsets": [67, 68, 69, 70]}, {"text": "suffer", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["suffer"], "offsets": [72]}], "trigger": {"text": "suffer", "tokens": ["suffer"], "offsets": [72]}}, {"event_type": "RWF", "arguments": [{"text": "final model", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["final", "model"], "offsets": [91, 92]}, {"text": "imbalanced attention", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["imbalanced", "attention"], "offsets": [94, 95]}, {"text": "training samples", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["training", "samples"], "offsets": [97, 98]}], "trigger": {"text": "pays", "tokens": ["pays"], "offsets": [93]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [141]}, {"text": "complementary online knowledge distillation", "nugget_type": "APP", "argument_type": "Content", "tokens": ["complementary", "online", "knowledge", "distillation"], "offsets": [143, 144, 145, 146]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [142]}}, {"event_type": "MDS", "arguments": [{"text": "provide", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["provide"], "offsets": [164]}, {"text": "dynamically updated teacher models", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["dynamically", "updated", "teacher", "models"], "offsets": [153, 154, 155, 156]}], "trigger": {"text": "uses", "tokens": ["uses"], "offsets": [152]}}, {"event_type": "PUR", "arguments": [{"text": "complementary knowledge", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["complementary", "knowledge"], "offsets": [165, 166]}, {"text": "to the student model", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["to", "the", "student", "model"], "offsets": [167, 168, 169, 170]}], "trigger": {"text": "provide", "tokens": ["provide"], "offsets": [164]}}, {"event_type": "FIN", "arguments": [{"text": "successfully alleviates", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["successfully", "alleviates"], "offsets": [183, 184]}, {"text": "achieves", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["achieves"], "offsets": [191]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [179]}}, {"event_type": "FAC", "arguments": [{"text": "complementary online knowledge distillation", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["complementary", "online", "knowledge", "distillation"], "offsets": [143, 144, 145, 146]}, {"text": "imbalanced training", "nugget_type": "WEA", "argument_type": "Object", "tokens": ["imbalanced", "training"], "offsets": [188, 189]}], "trigger": {"text": "successfully alleviates", "tokens": ["successfully", "alleviates"], "offsets": [183, 184]}}, {"event_type": "RWF", "arguments": [{"text": "catastrophic forgetting", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["catastrophic", "forgetting"], "offsets": [47, 48]}, {"text": "traditional static training", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["traditional", "static", "training"], "offsets": [59, 60, 61]}], "trigger": {"text": "affects", "tokens": ["affects"], "offsets": [57]}}, {"event_type": "RWF", "arguments": [{"text": "training samples", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["training", "samples"], "offsets": [116, 117]}, {"text": "not get", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["not", "get"], "offsets": [119, 120]}, {"text": "balanced training", "nugget_type": "APP", "argument_type": "Fault", "tokens": ["balanced", "training"], "offsets": [121, 122]}], "trigger": {"text": "not get", "tokens": ["not", "get"], "offsets": [119, 120]}}, {"event_type": "FAC", "arguments": [{"text": "complementary online knowledge distillation", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["complementary", "online", "knowledge", "distillation"], "offsets": [143, 144, 145, 146]}, {"text": "substantial improvements", "nugget_type": "STR", "argument_type": "Object", "tokens": ["substantial", "improvements"], "offsets": [192, 193]}, {"text": "over strong baseline systems", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["over", "strong", "baseline", "systems"], "offsets": [194, 195, 196, 197]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [191]}}], "document": ["neural", "networks", "tend", "to", "gradually", "forget", "the", "previously", "learned", "knowledge", "when", "learning", "multiple", "tasks", "sequentially", "from", "dynamic", "data", "distributions", ".", "this", "problem", "is", "called", "catastrophic", "forgetting", ",", "which", "is", "a", "fundamental", "challenge", "in", "the", "continual", "learning", "of", "neural", "networks", ".", "in", "this", "work", ",", "we", "observe", "that", "catastrophic", "forgetting", "not", "only", "occurs", "in", "continual", "learning", "but", "also", "affects", "the", "traditional", "static", "training", ".", "neural", "networks", ",", "especially", "neural", "machine", "translation", "models", ",", "suffer", "from", "catastrophic", "forgetting", "even", "if", "they", "learn", "from", "a", "static", "training", "set", ".", "to", "be", "specific", ",", "the", "final", "model", "pays", "imbalanced", "attention", "to", "training", "samples", ",", "where", "recently", "exposed", "samples", "attract", "more", "attention", "than", "earlier", "samples", ".", "the", "underlying", "cause", "is", "that", "training", "samples", "do", "not", "get", "balanced", "training", "in", "each", "model", "update", ",", "so", "we", "name", "this", "problem", "imbalanced", "training", ".", "to", "alleviate", "this", "problem", ",", "we", "propose", "complementary", "online", "knowledge", "distillation", "(", "cokd", ")", ",", "which", "uses", "dynamically", "updated", "teacher", "models", "trained", "on", "specific", "data", "orders", "to", "iteratively", "provide", "complementary", "knowledge", "to", "the", "student", "model", ".", "experimental", "results", "on", "multiple", "machine", "translation", "tasks", "show", "that", "our", "method", "successfully", "alleviates", "the", "problem", "of", "imbalanced", "training", "and", "achieves", "substantial", "improvements", "over", "strong", "baseline", "systems", "."]}, {"venue": "ACL", "title": "Hard-Coded Gaussian Attention for Neural Machine Translation", "abstract": "Recent work has questioned the importance of the Transformer\u2019s multi-headed attention for achieving high translation quality. We push further in this direction by developing a \u201chard-coded\u201d attention variant without any learned parameters. Surprisingly, replacing all learned self-attention heads in the encoder and decoder with fixed, input-agnostic Gaussian distributions minimally impacts BLEU scores across four different language pairs. However, additionally, hard-coding cross attention (which connects the decoder to the encoder) significantly lowers BLEU, suggesting that it is more important than self-attention. Much of this BLEU drop can be recovered by adding just a single learned cross attention head to an otherwise hard-coded Transformer. Taken as a whole, our results offer insight into which components of the Transformer are actually important, which we hope will guide future work into the development of simpler and more efficient attention-based models.", "doc_id": "f417aff7cde3bdf74a32c6c033d4e963", "publication_year": 2020, "sentences": ["recent work has questioned the importance of the transformer \u2019 s multi - headed attention for achieving high translation quality .", "we push further in this direction by developing a \u201c hard - coded \u201d attention variant without any learned parameters .", "surprisingly , replacing all learned self - attention heads in the encoder and decoder with fixed , input - agnostic gaussian distributions minimally impacts bleu scores across four different language pairs .", "however , additionally , hard - coding cross attention ( which connects the decoder to the encoder ) significantly lowers bleu , suggesting that it is more important than self - attention .", "much of this bleu drop can be recovered by adding just a single learned cross attention head to an otherwise hard - coded transformer .", "taken as a whole , our results offer insight into which components of the transformer are actually important , which we hope will guide future work into the development of simpler and more efficient attention - based models ."], "events": [{"event_type": "ITT", "arguments": [{"text": "transformer \u2019 s multi - headed attention", "nugget_type": "MOD", "argument_type": "Target", "tokens": ["transformer", "\u2019", "s", "multi", "-", "headed", "attention"], "offsets": [8, 9, 10, 11, 12, 13, 14]}], "trigger": {"text": "questioned", "tokens": ["questioned"], "offsets": [3]}}, {"event_type": "FAC", "arguments": [{"text": "input - agnostic gaussian distributions", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["input", "-", "agnostic", "gaussian", "distributions"], "offsets": [59, 60, 61, 62, 63]}, {"text": "bleu scores", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["bleu", "scores"], "offsets": [66, 67]}, {"text": "across four different language pairs", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["across", "four", "different", "language", "pairs"], "offsets": [68, 69, 70, 71, 72]}], "trigger": {"text": "minimally impacts", "tokens": ["minimally", "impacts"], "offsets": [64, 65]}}, {"event_type": "FAC", "arguments": [{"text": "bleu", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["bleu"], "offsets": [94]}, {"text": "hard - coding cross attention", "nugget_type": "MOD", "argument_type": "Subject", "tokens": ["hard", "-", "coding", "cross", "attention"], "offsets": [78, 79, 80, 81, 82]}], "trigger": {"text": "significantly lowers", "tokens": ["significantly", "lowers"], "offsets": [92, 93]}}, {"event_type": "FIN", "arguments": [{"text": "more important", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["more", "important"], "offsets": [100, 101]}], "trigger": {"text": "suggesting", "tokens": ["suggesting"], "offsets": [96]}}, {"event_type": "CMP", "arguments": [{"text": "more important", "nugget_type": "STR", "argument_type": "Result", "tokens": ["more", "important"], "offsets": [100, 101]}, {"text": "self - attention", "nugget_type": "MOD", "argument_type": "Arg2", "tokens": ["self", "-", "attention"], "offsets": [103, 104, 105]}], "trigger": {"text": "more important", "tokens": ["more", "important"], "offsets": [100, 101]}}, {"event_type": "MDS", "arguments": [{"text": "single learned cross attention head", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["single", "learned", "cross", "attention", "head"], "offsets": [119, 120, 121, 122, 123]}, {"text": "otherwise hard - coded transformer", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["otherwise", "hard", "-", "coded", "transformer"], "offsets": [126, 127, 128, 129, 130]}, {"text": "recovered", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["recovered"], "offsets": [114]}], "trigger": {"text": "adding", "tokens": ["adding"], "offsets": [116]}}, {"event_type": "PUR", "arguments": [{"text": "bleu drop", "nugget_type": "WEA", "argument_type": "Aim", "tokens": ["bleu", "drop"], "offsets": [110, 111]}], "trigger": {"text": "recovered", "tokens": ["recovered"], "offsets": [114]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [21]}, {"text": "\u201c hard - coded \u201d attention variant", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["\u201c", "hard", "-", "coded", "\u201d", "attention", "variant"], "offsets": [30, 31, 32, 33, 34, 35, 36]}], "trigger": {"text": "developing", "tokens": ["developing"], "offsets": [28]}}], "document": ["recent", "work", "has", "questioned", "the", "importance", "of", "the", "transformer", "\u2019", "s", "multi", "-", "headed", "attention", "for", "achieving", "high", "translation", "quality", ".", "we", "push", "further", "in", "this", "direction", "by", "developing", "a", "\u201c", "hard", "-", "coded", "\u201d", "attention", "variant", "without", "any", "learned", "parameters", ".", "surprisingly", ",", "replacing", "all", "learned", "self", "-", "attention", "heads", "in", "the", "encoder", "and", "decoder", "with", "fixed", ",", "input", "-", "agnostic", "gaussian", "distributions", "minimally", "impacts", "bleu", "scores", "across", "four", "different", "language", "pairs", ".", "however", ",", "additionally", ",", "hard", "-", "coding", "cross", "attention", "(", "which", "connects", "the", "decoder", "to", "the", "encoder", ")", "significantly", "lowers", "bleu", ",", "suggesting", "that", "it", "is", "more", "important", "than", "self", "-", "attention", ".", "much", "of", "this", "bleu", "drop", "can", "be", "recovered", "by", "adding", "just", "a", "single", "learned", "cross", "attention", "head", "to", "an", "otherwise", "hard", "-", "coded", "transformer", ".", "taken", "as", "a", "whole", ",", "our", "results", "offer", "insight", "into", "which", "components", "of", "the", "transformer", "are", "actually", "important", ",", "which", "we", "hope", "will", "guide", "future", "work", "into", "the", "development", "of", "simpler", "and", "more", "efficient", "attention", "-", "based", "models", "."]}, {"venue": "ACL", "title": "Learning Functional Distributional Semantics with Visual Data", "abstract": "Functional Distributional Semantics is a recently proposed framework for learning distributional semantics that provides linguistic interpretability. It models the meaning of a word as a binary classifier rather than a numerical vector. In this work, we propose a method to train a Functional Distributional Semantics model with grounded visual data. We train it on the Visual Genome dataset, which is closer to the kind of data encountered in human language acquisition than a large text corpus. On four external evaluation datasets, our model outperforms previous work on learning semantics from Visual Genome.", "doc_id": "4f1541f9740bcc17b0d9d47f535ab728", "publication_year": 2022, "sentences": ["functional distributional semantics is a recently proposed framework for learning distributional semantics that provides linguistic interpretability .", "it models the meaning of a word as a binary classifier rather than a numerical vector .", "in this work , we propose a method to train a functional distributional semantics model with grounded visual data .", "we train it on the visual genome dataset , which is closer to the kind of data encountered in human language acquisition than a large text corpus .", "on four external evaluation datasets , our model outperforms previous work on learning semantics from visual genome ."], "events": [{"event_type": "ITT", "arguments": [{"text": "distributional semantics", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["distributional", "semantics"], "offsets": [10, 11]}], "trigger": {"text": "learning", "tokens": ["learning"], "offsets": [9]}}, {"event_type": "RWS", "arguments": [{"text": "functional distributional semantics", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["functional", "distributional", "semantics"], "offsets": [0, 1, 2]}, {"text": "meaning of a word", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["meaning", "of", "a", "word"], "offsets": [20, 21, 22, 23]}, {"text": "binary classifier", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["binary", "classifier"], "offsets": [26, 27]}], "trigger": {"text": "models", "tokens": ["models"], "offsets": [18]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [38]}, {"text": "functional distributional semantics model with grounded visual data", "nugget_type": "APP", "argument_type": "Content", "tokens": ["functional", "distributional", "semantics", "model", "with", "grounded", "visual", "data"], "offsets": [45, 46, 47, 48, 49, 50, 51, 52]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [39]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [54]}, {"text": "functional distributional semantics model with grounded visual data", "nugget_type": "APP", "argument_type": "Content", "tokens": ["functional", "distributional", "semantics", "model", "with", "grounded", "visual", "data"], "offsets": [45, 46, 47, 48, 49, 50, 51, 52]}, {"text": "visual genome dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["visual", "genome", "dataset"], "offsets": [59, 60, 61]}], "trigger": {"text": "train", "tokens": ["train"], "offsets": [55]}}, {"event_type": "CMP", "arguments": [{"text": "four external evaluation datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["four", "external", "evaluation", "datasets"], "offsets": [83, 84, 85, 86]}, {"text": "functional distributional semantics model with grounded visual data", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["functional", "distributional", "semantics", "model", "with", "grounded", "visual", "data"], "offsets": [45, 46, 47, 48, 49, 50, 51, 52]}, {"text": "previous work on learning semantics from visual genome", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["previous", "work", "on", "learning", "semantics", "from", "visual", "genome"], "offsets": [91, 92, 93, 94, 95, 96, 97, 98]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [90]}}], "document": ["functional", "distributional", "semantics", "is", "a", "recently", "proposed", "framework", "for", "learning", "distributional", "semantics", "that", "provides", "linguistic", "interpretability", ".", "it", "models", "the", "meaning", "of", "a", "word", "as", "a", "binary", "classifier", "rather", "than", "a", "numerical", "vector", ".", "in", "this", "work", ",", "we", "propose", "a", "method", "to", "train", "a", "functional", "distributional", "semantics", "model", "with", "grounded", "visual", "data", ".", "we", "train", "it", "on", "the", "visual", "genome", "dataset", ",", "which", "is", "closer", "to", "the", "kind", "of", "data", "encountered", "in", "human", "language", "acquisition", "than", "a", "large", "text", "corpus", ".", "on", "four", "external", "evaluation", "datasets", ",", "our", "model", "outperforms", "previous", "work", "on", "learning", "semantics", "from", "visual", "genome", "."]}, {"venue": "ACL", "title": "Neural Pipeline for Zero-Shot Data-to-Text Generation", "abstract": "In data-to-text (D2T) generation, training on in-domain data leads to overfitting to the data representation and repeating training data noise. We examine how to avoid finetuning pretrained language models (PLMs) on D2T generation datasets while still taking advantage of surface realization capabilities of PLMs. Inspired by pipeline approaches, we propose to generate text by transforming single-item descriptions with a sequence of modules trained on general-domain text-based operations: ordering, aggregation, and paragraph compression. We train PLMs for performing these operations on a synthetic corpus WikiFluent which we build from English Wikipedia. Our experiments on two major triple-to-text datasets\u2014WebNLG and E2E\u2014show that our approach enables D2T generation from RDF triples in zero-shot settings.", "doc_id": "cf863b7553fb478ab0719d422f732e7b", "publication_year": 2022, "sentences": ["in data - to - text ( d2t ) generation , training on in - domain data leads to overfitting to the data representation and repeating training data noise .", "we examine how to avoid finetuning pretrained language models ( plms ) on d2t generation datasets while still taking advantage of surface realization capabilities of plms .", "inspired by pipeline approaches , we propose to generate text by transforming single - item descriptions with a sequence of modules trained on general - domain text - based operations : ordering , aggregation , and paragraph compression .", "we train plms for performing these operations on a synthetic corpus wikifluent which we build from english wikipedia .", "our experiments on two major triple - to - text datasets \u2014 webnlg and e2e \u2014 show that our approach enables d2t generation from rdf triples in zero - shot settings ."], "events": [{"event_type": "RWF", "arguments": [{"text": "overfitting", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["overfitting"], "offsets": [19]}, {"text": "data representation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["data", "representation"], "offsets": [22, 23]}], "trigger": {"text": "overfitting", "tokens": ["overfitting"], "offsets": [19]}}, {"event_type": "ITT", "arguments": [{"text": "data - to - text ( d2t ) generation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["data", "-", "to", "-", "text", "(", "d2t", ")", "generation"], "offsets": [1, 2, 3, 4, 5, 6, 7, 8, 9]}], "trigger": {"text": "training", "tokens": ["training"], "offsets": [11]}}, {"event_type": "RWF", "arguments": [{"text": "training data noise", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["training", "data", "noise"], "offsets": [26, 27, 28]}, {"text": "in - domain data", "nugget_type": "DST", "argument_type": "Concern", "tokens": ["in", "-", "domain", "data"], "offsets": [13, 14, 15, 16]}], "trigger": {"text": "repeating", "tokens": ["repeating"], "offsets": [25]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [30]}, {"text": "d2t generation datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["data", "-", "to", "-", "text", "generation", "datasets"], "offsets": [1, 2, 3, 4, 5, 44, 45]}, {"text": "pretrained language models", "nugget_type": "APP", "argument_type": "Content", "tokens": ["pretrained", "language", "models"], "offsets": [36, 37, 38]}], "trigger": {"text": "avoid finetuning", "tokens": ["avoid", "finetuning"], "offsets": [34, 35]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [30]}, {"text": "advantage of surface realization capabilities of plms", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["advantage", "of", "surface", "realization", "capabilities", "of", "plms"], "offsets": [49, 50, 51, 52, 53, 54, 55]}], "trigger": {"text": "taking", "tokens": ["taking"], "offsets": [48]}}, {"event_type": "MDS", "arguments": [{"text": "generate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["generate"], "offsets": [65]}, {"text": "single - item descriptions", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["single", "-", "item", "descriptions"], "offsets": [69, 70, 71, 72]}, {"text": "modules trained on general - domain text - based operations", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["modules", "trained", "on", "general", "-", "domain", "text", "-", "based", "operations"], "offsets": [77, 78, 79, 80, 81, 82, 83, 84, 85, 86]}], "trigger": {"text": "transforming", "tokens": ["transforming"], "offsets": [68]}}, {"event_type": "PUR", "arguments": [{"text": "text", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["text"], "offsets": [66]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [65]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [96]}, {"text": "performing", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["performing"], "offsets": [100]}, {"text": "synthetic corpus", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["synthetic", "corpus"], "offsets": [105, 106]}, {"text": "pretrained language models", "nugget_type": "APP", "argument_type": "Content", "tokens": ["pretrained", "language", "models"], "offsets": [36, 37, 38]}], "trigger": {"text": "train", "tokens": ["train"], "offsets": [97]}}, {"event_type": "PUR", "arguments": [{"text": "general - domain text - based operations", "nugget_type": "MOD", "argument_type": "Aim", "tokens": ["general", "-", "domain", "text", "-", "based", "operations"], "offsets": [80, 81, 82, 83, 84, 85, 86]}], "trigger": {"text": "performing", "tokens": ["performing"], "offsets": [100]}}, {"event_type": "FAC", "arguments": [{"text": "in zero - shot settings", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "zero", "-", "shot", "settings"], "offsets": [141, 142, 143, 144, 145]}, {"text": "from rdf triples", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "rdf", "triples"], "offsets": [138, 139, 140]}, {"text": "d2t generation", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["d2t", "generation"], "offsets": [136, 137]}, {"text": "pretrained language models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["pretrained", "language", "models"], "offsets": [36, 37, 38]}, {"text": "webnlg", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["webnlg"], "offsets": [127]}, {"text": "e2e", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["e2e"], "offsets": [129]}], "trigger": {"text": "enables", "tokens": ["enables"], "offsets": [135]}}], "document": ["in", "data", "-", "to", "-", "text", "(", "d2t", ")", "generation", ",", "training", "on", "in", "-", "domain", "data", "leads", "to", "overfitting", "to", "the", "data", "representation", "and", "repeating", "training", "data", "noise", ".", "we", "examine", "how", "to", "avoid", "finetuning", "pretrained", "language", "models", "(", "plms", ")", "on", "d2t", "generation", "datasets", "while", "still", "taking", "advantage", "of", "surface", "realization", "capabilities", "of", "plms", ".", "inspired", "by", "pipeline", "approaches", ",", "we", "propose", "to", "generate", "text", "by", "transforming", "single", "-", "item", "descriptions", "with", "a", "sequence", "of", "modules", "trained", "on", "general", "-", "domain", "text", "-", "based", "operations", ":", "ordering", ",", "aggregation", ",", "and", "paragraph", "compression", ".", "we", "train", "plms", "for", "performing", "these", "operations", "on", "a", "synthetic", "corpus", "wikifluent", "which", "we", "build", "from", "english", "wikipedia", ".", "our", "experiments", "on", "two", "major", "triple", "-", "to", "-", "text", "datasets", "\u2014", "webnlg", "and", "e2e", "\u2014", "show", "that", "our", "approach", "enables", "d2t", "generation", "from", "rdf", "triples", "in", "zero", "-", "shot", "settings", "."]}, {"venue": "ACL", "title": "DMix: Adaptive Distance-aware Interpolative Mixup", "abstract": "Interpolation-based regularisation methods such as Mixup, which generate virtual training samples, have proven to be effective for various tasks and modalities.We extend Mixup and propose DMix, an adaptive distance-aware interpolative Mixup that selects samples based on their diversity in the embedding space. DMix leverages the hyperbolic space as a similarity measure among input samples for a richer encoded representation.DMix achieves state-of-the-art results on sentence classification over existing data augmentation methods on 8 benchmark datasets across English, Arabic, Turkish, and Hindi languages while achieving benchmark F1 scores in 3 times less number of iterations.We probe the effectiveness of DMix in conjunction with various similarity measures and qualitatively analyze the different components.DMix being generalizable, can be applied to various tasks, models and modalities.", "doc_id": "6b293da19e5fa1299f6ec9e1c9722ed8", "publication_year": 2022, "sentences": ["interpolation - based regularisation methods such as mixup , which generate virtual training samples , have proven to be effective for various tasks and modalities .", "we extend mixup and propose dmix , an adaptive distance - aware interpolative mixup that selects samples based on their diversity in the embedding space .", "dmix leverages the hyperbolic space as a similarity measure among input samples for a richer encoded representation .", "dmix achieves state - of - the - art results on sentence classification over existing data augmentation methods on 8 benchmark datasets across english , arabic , turkish , and hindi languages while achieving benchmark f1 scores in 3 times less number of iterations .", "we probe the effectiveness of dmix in conjunction with various similarity measures and qualitatively analyze the different components .", "dmix being generalizable , can be applied to various tasks , models and modalities ."], "events": [{"event_type": "ITT", "arguments": [{"text": "interpolation - based regularisation methods", "nugget_type": "APP", "argument_type": "Target", "tokens": ["interpolation", "-", "based", "regularisation", "methods"], "offsets": [0, 1, 2, 3, 4]}], "trigger": {"text": "proven", "tokens": ["proven"], "offsets": [16]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [26]}, {"text": "mixup", "nugget_type": "APP", "argument_type": "Content", "tokens": ["mixup"], "offsets": [28]}], "trigger": {"text": "extend", "tokens": ["extend"], "offsets": [27]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [26]}, {"text": "dmix", "nugget_type": "APP", "argument_type": "Content", "tokens": ["dmix"], "offsets": [31]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [30]}}, {"event_type": "MDS", "arguments": [{"text": "based on their diversity in the embedding space", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["based", "on", "their", "diversity", "in", "the", "embedding", "space"], "offsets": [43, 44, 45, 46, 47, 48, 49, 50]}, {"text": "samples", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["samples"], "offsets": [42]}], "trigger": {"text": "selects", "tokens": ["selects"], "offsets": [41]}}, {"event_type": "MDS", "arguments": [{"text": "hyperbolic space", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["hyperbolic", "space"], "offsets": [55, 56]}, {"text": "similarity measure among input samples", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["similarity", "measure", "among", "input", "samples"], "offsets": [59, 60, 61, 62, 63]}, {"text": "richer encoded representation", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["richer", "encoded", "representation"], "offsets": [66, 67, 68]}], "trigger": {"text": "leverages", "tokens": ["leverages"], "offsets": [53]}}, {"event_type": "CMP", "arguments": [{"text": "dmix", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["dmix"], "offsets": [70]}, {"text": "existing data augmentation methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["existing", "data", "augmentation", "methods"], "offsets": [84, 85, 86, 87]}, {"text": "8 benchmark datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["8", "benchmark", "datasets"], "offsets": [89, 90, 91]}, {"text": "across english , arabic , turkish , and hindi languages", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["across", "english", ",", "arabic", ",", "turkish", ",", "and", "hindi", "languages"], "offsets": [92, 93, 94, 95, 96, 97, 98, 99, 100, 101]}, {"text": "state - of - the - art results on sentence classification", "nugget_type": "STR", "argument_type": "Result", "tokens": ["state", "-", "of", "-", "the", "-", "art", "results", "on", "sentence", "classification"], "offsets": [72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [71]}}, {"event_type": "FAC", "arguments": [{"text": "dmix", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["dmix"], "offsets": [70]}, {"text": "benchmark f1 scores", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["benchmark", "f1", "scores"], "offsets": [104, 105, 106]}, {"text": "in 3 times less number of iterations", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "3", "times", "less", "number", "of", "iterations"], "offsets": [107, 108, 109, 110, 111, 112, 113]}], "trigger": {"text": "achieving", "tokens": ["achieving"], "offsets": [103]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [115]}, {"text": "effectiveness of dmix", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["effectiveness", "of", "dmix"], "offsets": [118, 119, 120]}, {"text": "in conjunction with various similarity measures", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "conjunction", "with", "various", "similarity", "measures"], "offsets": [121, 122, 123, 124, 125, 126]}], "trigger": {"text": "probe", "tokens": ["probe"], "offsets": [116]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [115]}, {"text": "different components", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["different", "components"], "offsets": [131, 132]}], "trigger": {"text": "qualitatively analyze", "tokens": ["qualitatively", "analyze"], "offsets": [128, 129]}}, {"event_type": "FAC", "arguments": [{"text": "dmix", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["dmix"], "offsets": [134]}, {"text": "various tasks , models and modalities", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["various", "tasks", ",", "models", "and", "modalities"], "offsets": [142, 143, 144, 145, 146, 147]}], "trigger": {"text": "applied", "tokens": ["applied"], "offsets": [140]}}], "document": ["interpolation", "-", "based", "regularisation", "methods", "such", "as", "mixup", ",", "which", "generate", "virtual", "training", "samples", ",", "have", "proven", "to", "be", "effective", "for", "various", "tasks", "and", "modalities", ".", "we", "extend", "mixup", "and", "propose", "dmix", ",", "an", "adaptive", "distance", "-", "aware", "interpolative", "mixup", "that", "selects", "samples", "based", "on", "their", "diversity", "in", "the", "embedding", "space", ".", "dmix", "leverages", "the", "hyperbolic", "space", "as", "a", "similarity", "measure", "among", "input", "samples", "for", "a", "richer", "encoded", "representation", ".", "dmix", "achieves", "state", "-", "of", "-", "the", "-", "art", "results", "on", "sentence", "classification", "over", "existing", "data", "augmentation", "methods", "on", "8", "benchmark", "datasets", "across", "english", ",", "arabic", ",", "turkish", ",", "and", "hindi", "languages", "while", "achieving", "benchmark", "f1", "scores", "in", "3", "times", "less", "number", "of", "iterations", ".", "we", "probe", "the", "effectiveness", "of", "dmix", "in", "conjunction", "with", "various", "similarity", "measures", "and", "qualitatively", "analyze", "the", "different", "components", ".", "dmix", "being", "generalizable", ",", "can", "be", "applied", "to", "various", "tasks", ",", "models", "and", "modalities", "."]}, {"venue": "ACL", "title": "Self-Attention Architectures for Answer-Agnostic Neural Question Generation", "abstract": "Neural architectures based on self-attention, such as Transformers, recently attracted interest from the research community, and obtained significant improvements over the state of the art in several tasks. We explore how Transformers can be adapted to the task of Neural Question Generation without constraining the model to focus on a specific answer passage. We study the effect of several strategies to deal with out-of-vocabulary words such as copy mechanisms, placeholders, and contextual word embeddings. We report improvements obtained over the state-of-the-art on the SQuAD dataset according to automated metrics (BLEU, ROUGE), as well as qualitative human assessments of the system outputs.", "doc_id": "3f64b2586b1aa53bcf368473d63e20fa", "publication_year": 2019, "sentences": ["neural architectures based on self - attention , such as transformers , recently attracted interest from the research community , and obtained significant improvements over the state of the art in several tasks .", "we explore how transformers can be adapted to the task of neural question generation without constraining the model to focus on a specific answer passage .", "we study the effect of several strategies to deal with out - of - vocabulary words such as copy mechanisms , placeholders , and contextual word embeddings .", "we report improvements obtained over the state - of - the - art on the squad dataset according to automated metrics ( bleu , rouge ) , as well as qualitative human assessments of the system outputs ."], "events": [{"event_type": "ITT", "arguments": [{"text": "neural question generation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["neural", "question", "generation"], "offsets": [45, 46, 47]}], "trigger": {"text": "explore", "tokens": ["explore"], "offsets": [35]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [60]}, {"text": "effect of several strategies", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["effect", "of", "several", "strategies"], "offsets": [63, 64, 65, 66]}, {"text": "deal", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["deal"], "offsets": [68]}], "trigger": {"text": "study", "tokens": ["study"], "offsets": [61]}}, {"event_type": "PUR", "arguments": [{"text": "out - of - vocabulary words", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["out", "-", "of", "-", "vocabulary", "words"], "offsets": [70, 71, 72, 73, 74, 75]}], "trigger": {"text": "deal", "tokens": ["deal"], "offsets": [68]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [88]}, {"text": "obtained", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["obtained"], "offsets": [91]}], "trigger": {"text": "report", "tokens": ["report"], "offsets": [89]}}, {"event_type": "CMP", "arguments": [{"text": "as well as qualitative human assessments of the system outputs", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["as", "well", "as", "qualitative", "human", "assessments", "of", "the", "system", "outputs"], "offsets": [115, 116, 117, 118, 119, 120, 121, 122, 123, 124]}, {"text": "state - of - the - art", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art"], "offsets": [94, 95, 96, 97, 98, 99, 100]}, {"text": "improvements", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improvements"], "offsets": [90]}, {"text": "squad dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["squad", "dataset"], "offsets": [103, 104]}, {"text": "bleu", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["bleu"], "offsets": [110]}, {"text": "rouge", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["rouge"], "offsets": [112]}], "trigger": {"text": "obtained", "tokens": ["obtained"], "offsets": [91]}}], "document": ["neural", "architectures", "based", "on", "self", "-", "attention", ",", "such", "as", "transformers", ",", "recently", "attracted", "interest", "from", "the", "research", "community", ",", "and", "obtained", "significant", "improvements", "over", "the", "state", "of", "the", "art", "in", "several", "tasks", ".", "we", "explore", "how", "transformers", "can", "be", "adapted", "to", "the", "task", "of", "neural", "question", "generation", "without", "constraining", "the", "model", "to", "focus", "on", "a", "specific", "answer", "passage", ".", "we", "study", "the", "effect", "of", "several", "strategies", "to", "deal", "with", "out", "-", "of", "-", "vocabulary", "words", "such", "as", "copy", "mechanisms", ",", "placeholders", ",", "and", "contextual", "word", "embeddings", ".", "we", "report", "improvements", "obtained", "over", "the", "state", "-", "of", "-", "the", "-", "art", "on", "the", "squad", "dataset", "according", "to", "automated", "metrics", "(", "bleu", ",", "rouge", ")", ",", "as", "well", "as", "qualitative", "human", "assessments", "of", "the", "system", "outputs", "."]}, {"venue": "ACL", "title": "LAGr: Label Aligned Graphs for Better Systematic Generalization in Semantic Parsing", "abstract": "Semantic parsing is the task of producing structured meaning representations for natural language sentences. Recent research has pointed out that the commonly-used sequence-to-sequence (seq2seq) semantic parsers struggle to generalize systematically, i.e. to handle examples that require recombining known knowledge in novel settings. In this work, we show that better systematic generalization can be achieved by producing the meaning representation directly as a graph and not as a sequence. To this end we propose LAGr (Label Aligned Graphs), a general framework to produce semantic parses by independently predicting node and edge labels for a complete multi-layer input-aligned graph. The strongly-supervised LAGr algorithm requires aligned graphs as inputs, whereas weakly-supervised LAGr infers alignments for originally unaligned target graphs using approximate maximum-a-posteriori inference. Experiments demonstrate that LAGr achieves significant improvements in systematic generalization upon the baseline seq2seq parsers in both strongly- and weakly-supervised settings.", "doc_id": "f9e76523042ac5fcbfb4ee2cad30f485", "publication_year": 2022, "sentences": ["semantic parsing is the task of producing structured meaning representations for natural language sentences .", "recent research has pointed out that the commonly - used sequence - to - sequence ( seq2seq ) semantic parsers struggle to generalize systematically , i . e . to handle examples that require recombining known knowledge in novel settings .", "in this work , we show that better systematic generalization can be achieved by producing the meaning representation directly as a graph and not as a sequence .", "to this end we propose lagr ( label aligned graphs ) , a general framework to produce semantic parses by independently predicting node and edge labels for a complete multi - layer input - aligned graph .", "the strongly - supervised lagr algorithm requires aligned graphs as inputs , whereas weakly - supervised lagr infers alignments for originally unaligned target graphs using approximate maximum - a - posteriori inference .", "experiments demonstrate that lagr achieves significant improvements in systematic generalization upon the baseline seq2seq parsers in both strongly - and weakly - supervised settings ."], "events": [{"event_type": "ITT", "arguments": [{"text": "semantic parsing", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["semantic", "parsing"], "offsets": [0, 1]}], "trigger": {"text": "task of producing structured meaning representations", "tokens": ["task", "of", "producing", "structured", "meaning", "representations"], "offsets": [4, 5, 6, 7, 8, 9]}}, {"event_type": "RWF", "arguments": [{"text": "commonly - used sequence - to - sequence ( seq2seq ) semantic parsers", "nugget_type": "MOD", "argument_type": "Concern", "tokens": ["commonly", "-", "used", "sequence", "-", "to", "-", "sequence", "semantic", "parsers"], "offsets": [22, 23, 24, 25, 26, 27, 28, 29, 33, 34]}, {"text": "struggle", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["struggle"], "offsets": [35]}], "trigger": {"text": "struggle", "tokens": ["struggle"], "offsets": [35]}}, {"event_type": "PUR", "arguments": [{"text": "examples", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["examples"], "offsets": [46]}, {"text": "that require recombining known knowledge in novel settings", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["that", "require", "recombining", "known", "knowledge", "in", "novel", "settings"], "offsets": [47, 48, 49, 50, 51, 52, 53, 54]}], "trigger": {"text": "handle", "tokens": ["handle"], "offsets": [45]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [60]}, {"text": "achieved", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["achieved"], "offsets": [68]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [61]}}, {"event_type": "FAC", "arguments": [{"text": "by producing the meaning representation directly as a graph", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["by", "producing", "the", "meaning", "representation", "directly", "as", "a", "graph"], "offsets": [69, 70, 71, 72, 73, 74, 75, 76, 77]}, {"text": "better systematic generalization", "nugget_type": "STR", "argument_type": "Object", "tokens": ["better", "systematic", "generalization"], "offsets": [63, 64, 65]}], "trigger": {"text": "achieved", "tokens": ["achieved"], "offsets": [68]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [87]}, {"text": "lagr", "nugget_type": "APP", "argument_type": "Content", "tokens": ["label", "aligned", "graphs"], "offsets": [91, 92, 93]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [88]}}, {"event_type": "MDS", "arguments": [{"text": "semantic parses", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["semantic", "parses"], "offsets": [101, 102]}], "trigger": {"text": "produce", "tokens": ["produce"], "offsets": [100]}}, {"event_type": "MDS", "arguments": [{"text": "node", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["node"], "offsets": [106]}, {"text": "edge labels", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["edge", "labels"], "offsets": [108, 109]}, {"text": "complete multi - layer input - aligned graph", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["complete", "multi", "-", "layer", "input", "-", "aligned", "graph"], "offsets": [112, 113, 114, 115, 116, 117, 118, 119]}], "trigger": {"text": "predicting", "tokens": ["predicting"], "offsets": [105]}}, {"event_type": "WKS", "arguments": [{"text": "aligned graphs", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["aligned", "graphs"], "offsets": [128, 129]}, {"text": "inputs", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["inputs"], "offsets": [131]}], "trigger": {"text": "requires", "tokens": ["requires"], "offsets": [127]}}, {"event_type": "WKS", "arguments": [{"text": "approximate maximum - a - posteriori inference", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["approximate", "maximum", "-", "a", "-", "posteriori", "inference"], "offsets": [146, 147, 148, 149, 150, 151, 152]}, {"text": "infers", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["infers"], "offsets": [138]}, {"text": "originally unaligned target graphs", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["originally", "unaligned", "target", "graphs"], "offsets": [141, 142, 143, 144]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [145]}}, {"event_type": "PUR", "arguments": [{"text": "alignments", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["alignments"], "offsets": [139]}], "trigger": {"text": "infers", "tokens": ["infers"], "offsets": [138]}}, {"event_type": "FIN", "arguments": [{"text": "achieves", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["achieves"], "offsets": [158]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [155]}}, {"event_type": "CMP", "arguments": [{"text": "lagr", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["label", "aligned", "graphs"], "offsets": [91, 92, 93]}, {"text": "significant", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significant"], "offsets": [159]}, {"text": "improvements", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improvements"], "offsets": [160]}, {"text": "systematic generalization", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["systematic", "generalization"], "offsets": [162, 163]}, {"text": "baseline seq2seq parsers", "nugget_type": "MOD", "argument_type": "Arg2", "tokens": ["baseline", "seq2seq", "parsers"], "offsets": [166, 167, 168]}, {"text": "strongly - and weakly - supervised settings", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["strongly", "-", "and", "weakly", "-", "supervised", "settings"], "offsets": [171, 172, 173, 174, 175, 176, 177]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [158]}}], "document": ["semantic", "parsing", "is", "the", "task", "of", "producing", "structured", "meaning", "representations", "for", "natural", "language", "sentences", ".", "recent", "research", "has", "pointed", "out", "that", "the", "commonly", "-", "used", "sequence", "-", "to", "-", "sequence", "(", "seq2seq", ")", "semantic", "parsers", "struggle", "to", "generalize", "systematically", ",", "i", ".", "e", ".", "to", "handle", "examples", "that", "require", "recombining", "known", "knowledge", "in", "novel", "settings", ".", "in", "this", "work", ",", "we", "show", "that", "better", "systematic", "generalization", "can", "be", "achieved", "by", "producing", "the", "meaning", "representation", "directly", "as", "a", "graph", "and", "not", "as", "a", "sequence", ".", "to", "this", "end", "we", "propose", "lagr", "(", "label", "aligned", "graphs", ")", ",", "a", "general", "framework", "to", "produce", "semantic", "parses", "by", "independently", "predicting", "node", "and", "edge", "labels", "for", "a", "complete", "multi", "-", "layer", "input", "-", "aligned", "graph", ".", "the", "strongly", "-", "supervised", "lagr", "algorithm", "requires", "aligned", "graphs", "as", "inputs", ",", "whereas", "weakly", "-", "supervised", "lagr", "infers", "alignments", "for", "originally", "unaligned", "target", "graphs", "using", "approximate", "maximum", "-", "a", "-", "posteriori", "inference", ".", "experiments", "demonstrate", "that", "lagr", "achieves", "significant", "improvements", "in", "systematic", "generalization", "upon", "the", "baseline", "seq2seq", "parsers", "in", "both", "strongly", "-", "and", "weakly", "-", "supervised", "settings", "."]}, {"venue": "ACL", "title": "On the Compositionality Prediction of Noun Phrases using Poincar\u00e9 Embeddings", "abstract": "The compositionality degree of multiword expressions indicates to what extent the meaning of a phrase can be derived from the meaning of its constituents and their grammatical relations. Prediction of (non)-compositionality is a task that has been frequently addressed with distributional semantic models. We introduce a novel technique to blend hierarchical information with distributional information for predicting compositionality. In particular, we use hypernymy information of the multiword and its constituents encoded in the form of the recently introduced Poincar\u00e9 embeddings in addition to the distributional information to detect compositionality for noun phrases. Using a weighted average of the distributional similarity and a Poincar\u00e9 similarity function, we obtain consistent and substantial, statistically significant improvement across three gold standard datasets over state-of-the-art models based on distributional information only. Unlike traditional approaches that solely use an unsupervised setting, we have also framed the problem as a supervised task, obtaining comparable improvements. Further, we publicly release our Poincar\u00e9 embeddings, which are trained on the output of handcrafted lexical-syntactic patterns on a large corpus.", "doc_id": "c3287dfcc1e03c3a1e785a16b9c74281", "publication_year": 2019, "sentences": ["the compositionality degree of multiword expressions indicates to what extent the meaning of a phrase can be derived from the meaning of its constituents and their grammatical relations .", "prediction of ( non ) - compositionality is a task that has been frequently addressed with distributional semantic models .", "we introduce a novel technique to blend hierarchical information with distributional information for predicting compositionality .", "in particular , we use hypernymy information of the multiword and its constituents encoded in the form of the recently introduced poincare embeddings in addition to the distributional information to detect compositionality for noun phrases .", "using a weighted average of the distributional similarity and a poincare similarity function , we obtain consistent and substantial , statistically significant improvement across three gold standard datasets over state - of - the - art models based on distributional information only .", "unlike traditional approaches that solely use an unsupervised setting , we have also framed the problem as a supervised task , obtaining comparable improvements .", "further , we publicly release our poincare embeddings , which are trained on the output of handcrafted lexical - syntactic patterns on a large corpus ."], "events": [{"event_type": "ITT", "arguments": [{"text": "prediction of ( non ) - compositionality", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["prediction", "of", "(", "non", ")", "-", "compositionality"], "offsets": [29, 30, 31, 32, 33, 34, 35]}], "trigger": {"text": "task", "tokens": ["task"], "offsets": [38]}}, {"event_type": "PUR", "arguments": [{"text": "hierarchical information", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["hierarchical", "information"], "offsets": [56, 57]}, {"text": "distributional information", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["distributional", "information"], "offsets": [59, 60]}], "trigger": {"text": "blend", "tokens": ["blend"], "offsets": [55]}}, {"event_type": "MDS", "arguments": [{"text": "distributional information", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["distributional", "information"], "offsets": [92, 93]}, {"text": "hypernymy information of the multiword", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["hypernymy", "information", "of", "the", "multiword"], "offsets": [70, 71, 72, 73, 74]}, {"text": "its constituents", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["its", "constituents"], "offsets": [76, 77]}, {"text": "detect", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["detect"], "offsets": [95]}, {"text": "noun phrases", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["noun", "phrases"], "offsets": [98, 99]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [69]}}, {"event_type": "CMP", "arguments": [{"text": "using a weighted average of the distributional similarity and a poincare similarity function", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["using", "a", "weighted", "average", "of", "the", "distributional", "similarity", "and", "a", "poincare", "similarity", "function"], "offsets": [101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113]}, {"text": "three gold standard datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["three", "gold", "standard", "datasets"], "offsets": [125, 126, 127, 128]}, {"text": "consistent", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["consistent"], "offsets": [117]}, {"text": "substantial", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["substantial"], "offsets": [119]}, {"text": "statistically significant", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["statistically", "significant"], "offsets": [121, 122]}, {"text": "improvement", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improvement"], "offsets": [123]}, {"text": "state - of - the - art models", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art", "models"], "offsets": [130, 131, 132, 133, 134, 135, 136, 137]}, {"text": "based on distributional information only", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["based", "on", "distributional", "information", "only"], "offsets": [138, 139, 140, 141, 142]}], "trigger": {"text": "obtain", "tokens": ["obtain"], "offsets": [116]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [154]}, {"text": "supervised task", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["supervised", "task"], "offsets": [162, 163]}], "trigger": {"text": "framed", "tokens": ["framed"], "offsets": [157]}}, {"event_type": "FAC", "arguments": [{"text": "comparable improvements", "nugget_type": "STR", "argument_type": "Object", "tokens": ["comparable", "improvements"], "offsets": [166, 167]}], "trigger": {"text": "obtaining", "tokens": ["obtaining"], "offsets": [165]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [171]}, {"text": "poincare embeddings", "nugget_type": "APP", "argument_type": "Content", "tokens": ["poincare", "embeddings"], "offsets": [175, 176]}], "trigger": {"text": "release", "tokens": ["release"], "offsets": [173]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [49]}, {"text": "technique", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["technique"], "offsets": [53]}, {"text": "blend", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["blend"], "offsets": [55]}, {"text": "predicting compositionality", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["predicting", "compositionality"], "offsets": [62, 63]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [50]}}, {"event_type": "PUR", "arguments": [{"text": "compositionality", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["compositionality"], "offsets": [96]}], "trigger": {"text": "detect", "tokens": ["detect"], "offsets": [95]}}], "document": ["the", "compositionality", "degree", "of", "multiword", "expressions", "indicates", "to", "what", "extent", "the", "meaning", "of", "a", "phrase", "can", "be", "derived", "from", "the", "meaning", "of", "its", "constituents", "and", "their", "grammatical", "relations", ".", "prediction", "of", "(", "non", ")", "-", "compositionality", "is", "a", "task", "that", "has", "been", "frequently", "addressed", "with", "distributional", "semantic", "models", ".", "we", "introduce", "a", "novel", "technique", "to", "blend", "hierarchical", "information", "with", "distributional", "information", "for", "predicting", "compositionality", ".", "in", "particular", ",", "we", "use", "hypernymy", "information", "of", "the", "multiword", "and", "its", "constituents", "encoded", "in", "the", "form", "of", "the", "recently", "introduced", "poincare", "embeddings", "in", "addition", "to", "the", "distributional", "information", "to", "detect", "compositionality", "for", "noun", "phrases", ".", "using", "a", "weighted", "average", "of", "the", "distributional", "similarity", "and", "a", "poincare", "similarity", "function", ",", "we", "obtain", "consistent", "and", "substantial", ",", "statistically", "significant", "improvement", "across", "three", "gold", "standard", "datasets", "over", "state", "-", "of", "-", "the", "-", "art", "models", "based", "on", "distributional", "information", "only", ".", "unlike", "traditional", "approaches", "that", "solely", "use", "an", "unsupervised", "setting", ",", "we", "have", "also", "framed", "the", "problem", "as", "a", "supervised", "task", ",", "obtaining", "comparable", "improvements", ".", "further", ",", "we", "publicly", "release", "our", "poincare", "embeddings", ",", "which", "are", "trained", "on", "the", "output", "of", "handcrafted", "lexical", "-", "syntactic", "patterns", "on", "a", "large", "corpus", "."]}, {"venue": "ACL", "title": "Improving the Robustness of Question Answering Systems to Question Paraphrasing", "abstract": "Despite the advancement of question answering (QA) systems and rapid improvements on held-out test sets, their generalizability is a topic of concern. We explore the robustness of QA models to question paraphrasing by creating two test sets consisting of paraphrased SQuAD questions. Paraphrased questions from the first test set are very similar to the original questions designed to test QA models\u2019 over-sensitivity, while questions from the second test set are paraphrased using context words near an incorrect answer candidate in an attempt to confuse QA models. We show that both paraphrased test sets lead to significant decrease in performance on multiple state-of-the-art QA models. Using a neural paraphrasing model trained to generate multiple paraphrased questions for a given source question and a set of paraphrase suggestions, we propose a data augmentation approach that requires no human intervention to re-train the models for improved robustness to question paraphrasing.", "doc_id": "cc90cf8a89b308caf0758defbb3df0d2", "publication_year": 2019, "sentences": ["despite the advancement of question answering ( qa ) systems and rapid improvements on held - out test sets , their generalizability is a topic of concern .", "we explore the robustness of qa models to question paraphrasing by creating two test sets consisting of paraphrased squad questions .", "paraphrased questions from the first test set are very similar to the original questions designed to test qa models \u2019 over - sensitivity , while questions from the second test set are paraphrased using context words near an incorrect answer candidate in an attempt to confuse qa models .", "we show that both paraphrased test sets lead to significant decrease in performance on multiple state - of - the - art qa models .", "using a neural paraphrasing model trained to generate multiple paraphrased questions for a given source question and a set of paraphrase suggestions , we propose a data augmentation approach that requires no human intervention to re - train the models for improved robustness to question paraphrasing ."], "events": [{"event_type": "RWF", "arguments": [{"text": "concern", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["concern"], "offsets": [26]}], "trigger": {"text": "concern", "tokens": ["concern"], "offsets": [26]}}, {"event_type": "MDS", "arguments": [{"text": "two test sets", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["two", "test", "sets"], "offsets": [40, 41, 42]}, {"text": "explore", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["explore"], "offsets": [29]}], "trigger": {"text": "creating", "tokens": ["creating"], "offsets": [39]}}, {"event_type": "PUR", "arguments": [{"text": "robustness of qa models", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["robustness", "of", "question", "answering", "models"], "offsets": [31, 32, 4, 5, 34]}, {"text": "to question paraphrasing", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["to", "question", "paraphrasing"], "offsets": [35, 36, 37]}], "trigger": {"text": "explore", "tokens": ["explore"], "offsets": [29]}}, {"event_type": "FIN", "arguments": [{"text": "significant decrease", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["significant", "decrease"], "offsets": [107, 108]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [99]}}, {"event_type": "FAC", "arguments": [{"text": "two test sets", "nugget_type": "DST", "argument_type": "Subject", "tokens": ["two", "test", "sets"], "offsets": [40, 41, 42]}, {"text": "performance on multiple state - of - the - art qa models", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["performance", "on", "multiple", "state", "-", "of", "-", "the", "-", "art", "question", "answering", "models"], "offsets": [110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 4, 5, 121]}], "trigger": {"text": "significant decrease", "tokens": ["significant", "decrease"], "offsets": [107, 108]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [146]}, {"text": "data augmentation approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["data", "augmentation", "approach"], "offsets": [149, 150, 151]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [147]}}, {"event_type": "MDS", "arguments": [{"text": "models", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["models"], "offsets": [162]}, {"text": "improved", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improved"], "offsets": [164]}, {"text": "neural paraphrasing model", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["neural", "paraphrasing", "model"], "offsets": [125, 126, 127]}, {"text": "set of paraphrase suggestions", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["set", "of", "paraphrase", "suggestions"], "offsets": [141, 142, 143, 144]}], "trigger": {"text": "re - train", "tokens": ["re", "-", "train"], "offsets": [158, 159, 160]}}, {"event_type": "PUR", "arguments": [{"text": "robustness to question paraphrasing", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["robustness", "to", "question", "paraphrasing"], "offsets": [165, 166, 167, 168]}], "trigger": {"text": "improved", "tokens": ["improved"], "offsets": [164]}}], "document": ["despite", "the", "advancement", "of", "question", "answering", "(", "qa", ")", "systems", "and", "rapid", "improvements", "on", "held", "-", "out", "test", "sets", ",", "their", "generalizability", "is", "a", "topic", "of", "concern", ".", "we", "explore", "the", "robustness", "of", "qa", "models", "to", "question", "paraphrasing", "by", "creating", "two", "test", "sets", "consisting", "of", "paraphrased", "squad", "questions", ".", "paraphrased", "questions", "from", "the", "first", "test", "set", "are", "very", "similar", "to", "the", "original", "questions", "designed", "to", "test", "qa", "models", "\u2019", "over", "-", "sensitivity", ",", "while", "questions", "from", "the", "second", "test", "set", "are", "paraphrased", "using", "context", "words", "near", "an", "incorrect", "answer", "candidate", "in", "an", "attempt", "to", "confuse", "qa", "models", ".", "we", "show", "that", "both", "paraphrased", "test", "sets", "lead", "to", "significant", "decrease", "in", "performance", "on", "multiple", "state", "-", "of", "-", "the", "-", "art", "qa", "models", ".", "using", "a", "neural", "paraphrasing", "model", "trained", "to", "generate", "multiple", "paraphrased", "questions", "for", "a", "given", "source", "question", "and", "a", "set", "of", "paraphrase", "suggestions", ",", "we", "propose", "a", "data", "augmentation", "approach", "that", "requires", "no", "human", "intervention", "to", "re", "-", "train", "the", "models", "for", "improved", "robustness", "to", "question", "paraphrasing", "."]}, {"venue": "ACL", "title": "A Span-Based Model for Joint Overlapped and Discontinuous Named Entity Recognition", "abstract": "Research on overlapped and discontinuous named entity recognition (NER) has received increasing attention. The majority of previous work focuses on either overlapped or discontinuous entities. In this paper, we propose a novel span-based model that can recognize both overlapped and discontinuous entities jointly. The model includes two major steps. First, entity fragments are recognized by traversing over all possible text spans, thus, overlapped entities can be recognized. Second, we perform relation classification to judge whether a given pair of entity fragments to be overlapping or succession. In this way, we can recognize not only discontinuous entities, and meanwhile doubly check the overlapped entities. As a whole, our model can be regarded as a relation extraction paradigm essentially. Experimental results on multiple benchmark datasets (i.e., CLEF, GENIA and ACE05) show that our model is highly competitive for overlapped and discontinuous NER.", "doc_id": "06e4f17e44383b8dc773311725e0e29b", "publication_year": 2021, "sentences": ["research on overlapped and discontinuous named entity recognition ( ner ) has received increasing attention .", "the majority of previous work focuses on either overlapped or discontinuous entities .", "in this paper , we propose a novel span - based model that can recognize both overlapped and discontinuous entities jointly .", "the model includes two major steps .", "first , entity fragments are recognized by traversing over all possible text spans , thus , overlapped entities can be recognized .", "second , we perform relation classification to judge whether a given pair of entity fragments to be overlapping or succession .", "in this way , we can recognize not only discontinuous entities , and meanwhile doubly check the overlapped entities .", "as a whole , our model can be regarded as a relation extraction paradigm essentially .", "experimental results on multiple benchmark datasets ( i . e . , clef , genia and ace05 ) show that our model is highly competitive for overlapped and discontinuous ner ."], "events": [{"event_type": "ITT", "arguments": [{"text": "overlapped and discontinuous named entity recognition", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["overlapped", "and", "discontinuous", "named", "entity", "recognition"], "offsets": [2, 3, 4, 5, 6, 7]}], "trigger": {"text": "received", "tokens": ["received"], "offsets": [12]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [33]}, {"text": "span - based model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["span", "-", "based", "model"], "offsets": [37, 38, 39, 40]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [34]}}, {"event_type": "MDS", "arguments": [{"text": "all possible text spans", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["all", "possible", "text", "spans"], "offsets": [67, 68, 69, 70]}, {"text": "entity fragments", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["entity", "fragments"], "offsets": [60, 61]}, {"text": "overlapped entities", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["overlapped", "entities"], "offsets": [74, 75]}], "trigger": {"text": "recognized", "tokens": ["recognized"], "offsets": [63]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [82]}, {"text": "relation classification", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["relation", "classification"], "offsets": [84, 85]}, {"text": "judge", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["judge"], "offsets": [87]}], "trigger": {"text": "perform", "tokens": ["perform"], "offsets": [83]}}, {"event_type": "PUR", "arguments": [{"text": "whether a given pair of entity fragments to be overlapping or succession", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["whether", "a", "given", "pair", "of", "entity", "fragments", "to", "be", "overlapping", "or", "succession"], "offsets": [88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99]}], "trigger": {"text": "judge", "tokens": ["judge"], "offsets": [87]}}, {"event_type": "FAC", "arguments": [{"text": "multiple benchmark datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["multiple", "benchmark", "datasets"], "offsets": [140, 141, 142]}, {"text": "span - based model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["span", "-", "based", "model"], "offsets": [37, 38, 39, 40]}, {"text": "overlapped and discontinuous ner", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["overlapped", "and", "discontinuous", "ner"], "offsets": [163, 164, 165, 166]}, {"text": "highly", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["highly"], "offsets": [160]}], "trigger": {"text": "competitive", "tokens": ["competitive"], "offsets": [161]}}], "document": ["research", "on", "overlapped", "and", "discontinuous", "named", "entity", "recognition", "(", "ner", ")", "has", "received", "increasing", "attention", ".", "the", "majority", "of", "previous", "work", "focuses", "on", "either", "overlapped", "or", "discontinuous", "entities", ".", "in", "this", "paper", ",", "we", "propose", "a", "novel", "span", "-", "based", "model", "that", "can", "recognize", "both", "overlapped", "and", "discontinuous", "entities", "jointly", ".", "the", "model", "includes", "two", "major", "steps", ".", "first", ",", "entity", "fragments", "are", "recognized", "by", "traversing", "over", "all", "possible", "text", "spans", ",", "thus", ",", "overlapped", "entities", "can", "be", "recognized", ".", "second", ",", "we", "perform", "relation", "classification", "to", "judge", "whether", "a", "given", "pair", "of", "entity", "fragments", "to", "be", "overlapping", "or", "succession", ".", "in", "this", "way", ",", "we", "can", "recognize", "not", "only", "discontinuous", "entities", ",", "and", "meanwhile", "doubly", "check", "the", "overlapped", "entities", ".", "as", "a", "whole", ",", "our", "model", "can", "be", "regarded", "as", "a", "relation", "extraction", "paradigm", "essentially", ".", "experimental", "results", "on", "multiple", "benchmark", "datasets", "(", "i", ".", "e", ".", ",", "clef", ",", "genia", "and", "ace05", ")", "show", "that", "our", "model", "is", "highly", "competitive", "for", "overlapped", "and", "discontinuous", "ner", "."]}, {"venue": "ACL", "title": "Unsupervised multiple-choice question generation for out-of-domain Q&A fine-tuning", "abstract": "Pre-trained models have shown very good performances on a number of question answering benchmarks especially when fine-tuned on multiple question answering datasets at once. In this work, we propose an approach for generating a fine-tuning dataset thanks to a rule-based algorithm that generates questions and answers from unannotated sentences. We show that the state-of-the-art model UnifiedQA can greatly benefit from such a system on a multiple-choice benchmark about physics, biology and chemistry it has never been trained on. We further show that improved performances may be obtained by selecting the most challenging distractors (wrong answers), with a dedicated ranker based on a pretrained RoBERTa model.", "doc_id": "163cbfc322e122f4638cf54f7ec41605", "publication_year": 2022, "sentences": ["pre - trained models have shown very good performances on a number of question answering benchmarks especially when fine - tuned on multiple question answering datasets at once .", "in this work , we propose an approach for generating a fine - tuning dataset thanks to a rule - based algorithm that generates questions and answers from unannotated sentences .", "we show that the state - of - the - art model unifiedqa can greatly benefit from such a system on a multiple - choice benchmark about physics , biology and chemistry it has never been trained on .", "we further show that improved performances may be obtained by selecting the most challenging distractors ( wrong answers ) , with a dedicated ranker based on a pretrained roberta model ."], "events": [{"event_type": "ITT", "arguments": [{"text": "pre - trained models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["pre", "-", "trained", "models"], "offsets": [0, 1, 2, 3]}], "trigger": {"text": "shown", "tokens": ["shown"], "offsets": [5]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [33]}, {"text": "approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["approach"], "offsets": [36]}, {"text": "generating", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["generating"], "offsets": [38]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [34]}}, {"event_type": "PUR", "arguments": [{"text": "fine - tuning dataset", "nugget_type": "DST", "argument_type": "Aim", "tokens": ["fine", "-", "tuning", "dataset"], "offsets": [40, 41, 42, 43]}], "trigger": {"text": "generating", "tokens": ["generating"], "offsets": [38]}}, {"event_type": "MDS", "arguments": [{"text": "rule - based algorithm", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["rule", "-", "based", "algorithm"], "offsets": [47, 48, 49, 50]}, {"text": "questions and answers", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["questions", "and", "answers"], "offsets": [53, 54, 55]}, {"text": "unannotated sentences", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["unannotated", "sentences"], "offsets": [57, 58]}], "trigger": {"text": "generates", "tokens": ["generates"], "offsets": [52]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [60]}, {"text": "benefit", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["benefit"], "offsets": [75]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [61]}}, {"event_type": "FAC", "arguments": [{"text": "state - of - the - art model unifiedqa", "nugget_type": "APP", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art", "model", "unifiedqa"], "offsets": [64, 65, 66, 67, 68, 69, 70, 71, 72]}, {"text": "multiple - choice benchmark", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["multiple", "-", "choice", "benchmark"], "offsets": [82, 83, 84, 85]}], "trigger": {"text": "benefit", "tokens": ["benefit"], "offsets": [75]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [99]}, {"text": "obtained", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["obtained"], "offsets": [107]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [101]}}, {"event_type": "FAC", "arguments": [{"text": "improved performances", "nugget_type": "STR", "argument_type": "Object", "tokens": ["improved", "performances"], "offsets": [103, 104]}, {"text": "selecting the most challenging distractors", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["selecting", "the", "most", "challenging", "distractors"], "offsets": [109, 110, 111, 112, 113]}, {"text": "with a dedicated ranker based on a pretrained roberta model", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "a", "dedicated", "ranker", "based", "on", "a", "pretrained", "roberta", "model"], "offsets": [119, 120, 121, 122, 123, 124, 125, 126, 127, 128]}], "trigger": {"text": "obtained", "tokens": ["obtained"], "offsets": [107]}}], "document": ["pre", "-", "trained", "models", "have", "shown", "very", "good", "performances", "on", "a", "number", "of", "question", "answering", "benchmarks", "especially", "when", "fine", "-", "tuned", "on", "multiple", "question", "answering", "datasets", "at", "once", ".", "in", "this", "work", ",", "we", "propose", "an", "approach", "for", "generating", "a", "fine", "-", "tuning", "dataset", "thanks", "to", "a", "rule", "-", "based", "algorithm", "that", "generates", "questions", "and", "answers", "from", "unannotated", "sentences", ".", "we", "show", "that", "the", "state", "-", "of", "-", "the", "-", "art", "model", "unifiedqa", "can", "greatly", "benefit", "from", "such", "a", "system", "on", "a", "multiple", "-", "choice", "benchmark", "about", "physics", ",", "biology", "and", "chemistry", "it", "has", "never", "been", "trained", "on", ".", "we", "further", "show", "that", "improved", "performances", "may", "be", "obtained", "by", "selecting", "the", "most", "challenging", "distractors", "(", "wrong", "answers", ")", ",", "with", "a", "dedicated", "ranker", "based", "on", "a", "pretrained", "roberta", "model", "."]}, {"venue": "ACL", "title": "TXtract: Taxonomy-Aware Knowledge Extraction for Thousands of Product Categories", "abstract": "Extracting structured knowledge from product profiles is crucial for various applications in e-Commerce. State-of-the-art approaches for knowledge extraction were each designed for a single category of product, and thus do not apply to real-life e-Commerce scenarios, which often contain thousands of diverse categories. This paper proposes TXtract, a taxonomy-aware knowledge extraction model that applies to thousands of product categories organized in a hierarchical taxonomy. Through category conditional self-attention and multi-task learning, our approach is both scalable, as it trains a single model for thousands of categories, and effective, as it extracts category-specific attribute values. Experiments on products from a taxonomy with 4,000 categories show that TXtract outperforms state-of-the-art approaches by up to 10% in F1 and 15% in coverage across all categories.", "doc_id": "5851e0ff9bba4953c3cbd804f51f675c", "publication_year": 2020, "sentences": ["extracting structured knowledge from product profiles is crucial for various applications in e - commerce .", "state - of - the - art approaches for knowledge extraction were each designed for a single category of product , and thus do not apply to real - life e - commerce scenarios , which often contain thousands of diverse categories .", "this paper proposes txtract , a taxonomy - aware knowledge extraction model that applies to thousands of product categories organized in a hierarchical taxonomy .", "through category conditional self - attention and multi - task learning , our approach is both scalable , as it trains a single model for thousands of categories , and effective , as it extracts category - specific attribute values .", "experiments on products from a taxonomy with 4 , 000 categories show that txtract outperforms state - of - the - art approaches by up to 10 % in f1 and 15 % in coverage across all categories ."], "events": [{"event_type": "ITT", "arguments": [{"text": "knowledge extraction", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["knowledge", "extraction"], "offsets": [25, 26]}], "trigger": {"text": "designed", "tokens": ["designed"], "offsets": [29]}}, {"event_type": "PRP", "arguments": [{"text": "txtract", "nugget_type": "APP", "argument_type": "Content", "tokens": ["txtract"], "offsets": [62]}], "trigger": {"text": "proposes", "tokens": ["proposes"], "offsets": [61]}}, {"event_type": "FAC", "arguments": [{"text": "txtract", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["txtract"], "offsets": [62]}], "trigger": {"text": "scalable", "tokens": ["scalable"], "offsets": [100]}}, {"event_type": "FAC", "arguments": [{"text": "txtract", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["txtract"], "offsets": [62]}], "trigger": {"text": "effective", "tokens": ["effective"], "offsets": [114]}}, {"event_type": "FAC", "arguments": [{"text": "txtract", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["txtract"], "offsets": [62]}, {"text": "single model", "nugget_type": "APP", "argument_type": "Object", "tokens": ["single", "model"], "offsets": [106, 107]}, {"text": "thousands of categories", "nugget_type": "STR", "argument_type": "Target", "tokens": ["thousands", "of", "categories"], "offsets": [109, 110, 111]}], "trigger": {"text": "trains", "tokens": ["trains"], "offsets": [104]}}, {"event_type": "CMP", "arguments": [{"text": "txtract", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["txtract"], "offsets": [62]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [139]}, {"text": "state - of - the - art approaches", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art", "approaches"], "offsets": [140, 141, 142, 143, 144, 145, 146, 147]}, {"text": "10 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["10", "%"], "offsets": [151, 152]}, {"text": "f1", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["f1"], "offsets": [154]}, {"text": "15 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["15", "%"], "offsets": [156, 157]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [139]}}, {"event_type": "FAC", "arguments": [{"text": "txtract", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["txtract"], "offsets": [62]}, {"text": "category - specific attribute values", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["category", "-", "specific", "attribute", "values"], "offsets": [119, 120, 121, 122, 123]}], "trigger": {"text": "extracts", "tokens": ["extracts"], "offsets": [118]}}], "document": ["extracting", "structured", "knowledge", "from", "product", "profiles", "is", "crucial", "for", "various", "applications", "in", "e", "-", "commerce", ".", "state", "-", "of", "-", "the", "-", "art", "approaches", "for", "knowledge", "extraction", "were", "each", "designed", "for", "a", "single", "category", "of", "product", ",", "and", "thus", "do", "not", "apply", "to", "real", "-", "life", "e", "-", "commerce", "scenarios", ",", "which", "often", "contain", "thousands", "of", "diverse", "categories", ".", "this", "paper", "proposes", "txtract", ",", "a", "taxonomy", "-", "aware", "knowledge", "extraction", "model", "that", "applies", "to", "thousands", "of", "product", "categories", "organized", "in", "a", "hierarchical", "taxonomy", ".", "through", "category", "conditional", "self", "-", "attention", "and", "multi", "-", "task", "learning", ",", "our", "approach", "is", "both", "scalable", ",", "as", "it", "trains", "a", "single", "model", "for", "thousands", "of", "categories", ",", "and", "effective", ",", "as", "it", "extracts", "category", "-", "specific", "attribute", "values", ".", "experiments", "on", "products", "from", "a", "taxonomy", "with", "4", ",", "000", "categories", "show", "that", "txtract", "outperforms", "state", "-", "of", "-", "the", "-", "art", "approaches", "by", "up", "to", "10", "%", "in", "f1", "and", "15", "%", "in", "coverage", "across", "all", "categories", "."]}, {"venue": "ACL", "title": "Cross-modal Memory Networks for Radiology Report Generation", "abstract": "Medical imaging plays a significant role in clinical practice of medical diagnosis, where the text reports of the images are essential in understanding them and facilitating later treatments. By generating the reports automatically, it is beneficial to help lighten the burden of radiologists and significantly promote clinical automation, which already attracts much attention in applying artificial intelligence to medical domain. Previous studies mainly follow the encoder-decoder paradigm and focus on the aspect of text generation, with few studies considering the importance of cross-modal mappings and explicitly exploit such mappings to facilitate radiology report generation. In this paper, we propose a cross-modal memory networks (CMN) to enhance the encoder-decoder framework for radiology report generation, where a shared memory is designed to record the alignment between images and texts so as to facilitate the interaction and generation across modalities. Experimental results illustrate the effectiveness of our proposed model, where state-of-the-art performance is achieved on two widely used benchmark datasets, i.e., IU X-Ray and MIMIC-CXR. Further analyses also prove that our model is able to better align information from radiology images and texts so as to help generating more accurate reports in terms of clinical indicators.", "doc_id": "b42ff9164407b48e7731684640e8356b", "publication_year": 2021, "sentences": ["medical imaging plays a significant role in clinical practice of medical diagnosis , where the text reports of the images are essential in understanding them and facilitating later treatments .", "by generating the reports automatically , it is beneficial to help lighten the burden of radiologists and significantly promote clinical automation , which already attracts much attention in applying artificial intelligence to medical domain .", "previous studies mainly follow the encoder - decoder paradigm and focus on the aspect of text generation , with few studies considering the importance of cross - modal mappings and explicitly exploit such mappings to facilitate radiology report generation .", "in this paper , we propose a cross - modal memory networks ( cmn ) to enhance the encoder - decoder framework for radiology report generation , where a shared memory is designed to record the alignment between images and texts so as to facilitate the interaction and generation across modalities .", "experimental results illustrate the effectiveness of our proposed model , where state - of - the - art performance is achieved on two widely used benchmark datasets , i . e . , iu x - ray and mimic - cxr .", "further analyses also prove that our model is able to better align information from radiology images and texts so as to help generating more accurate reports in terms of clinical indicators ."], "events": [{"event_type": "ITT", "arguments": [{"text": "medical imaging", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["medical", "imaging"], "offsets": [0, 1]}], "trigger": {"text": "plays", "tokens": ["plays"], "offsets": [2]}}, {"event_type": "RWS", "arguments": [{"text": "reports", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["reports"], "offsets": [33]}, {"text": "lighten", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["lighten"], "offsets": [41]}, {"text": "promote", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["promote"], "offsets": [48]}, {"text": "automatically", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["automatically"], "offsets": [34]}], "trigger": {"text": "generating", "tokens": ["generating"], "offsets": [31]}}, {"event_type": "PUR", "arguments": [{"text": "burden of radiologists", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["burden", "of", "radiologists"], "offsets": [43, 44, 45]}], "trigger": {"text": "lighten", "tokens": ["lighten"], "offsets": [41]}}, {"event_type": "PUR", "arguments": [{"text": "clinical automation", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["clinical", "automation"], "offsets": [49, 50]}], "trigger": {"text": "promote", "tokens": ["promote"], "offsets": [48]}}, {"event_type": "RWF", "arguments": [{"text": "few studies", "nugget_type": "WEA", "argument_type": "Concern", "tokens": ["few", "studies"], "offsets": [84, 85]}, {"text": "cross - modal mappings", "nugget_type": "APP", "argument_type": "Fault", "tokens": ["cross", "-", "modal", "mappings"], "offsets": [90, 91, 92, 93]}], "trigger": {"text": "considering", "tokens": ["considering"], "offsets": [86]}}, {"event_type": "RWF", "arguments": [{"text": "few studies", "nugget_type": "WEA", "argument_type": "Concern", "tokens": ["few", "studies"], "offsets": [84, 85]}, {"text": "facilitate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["facilitate"], "offsets": [100]}, {"text": "mappings", "nugget_type": "APP", "argument_type": "Fault", "tokens": ["mappings"], "offsets": [98]}], "trigger": {"text": "exploit", "tokens": ["exploit"], "offsets": [96]}}, {"event_type": "PUR", "arguments": [{"text": "radiology report generation", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["radiology", "report", "generation"], "offsets": [101, 102, 103]}], "trigger": {"text": "facilitate", "tokens": ["facilitate"], "offsets": [100]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [109]}, {"text": "cross - modal memory networks", "nugget_type": "APP", "argument_type": "Content", "tokens": ["cross", "-", "modal", "memory", "networks"], "offsets": [112, 113, 114, 115, 116]}, {"text": "enhance", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["enhance"], "offsets": [121]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [110]}}, {"event_type": "PUR", "arguments": [{"text": "encoder - decoder framework", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["encoder", "-", "decoder", "framework"], "offsets": [123, 124, 125, 126]}], "trigger": {"text": "enhance", "tokens": ["enhance"], "offsets": [121]}}, {"event_type": "MDS", "arguments": [{"text": "shared memory", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["shared", "memory"], "offsets": [134, 135]}, {"text": "facilitate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["facilitate"], "offsets": [149]}, {"text": "alignment between images and texts", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["alignment", "between", "images", "and", "texts"], "offsets": [141, 142, 143, 144, 145]}], "trigger": {"text": "record", "tokens": ["record"], "offsets": [139]}}, {"event_type": "PUR", "arguments": [{"text": "interaction across modalities", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["interaction", "across", "modalities"], "offsets": [151, 154, 155]}, {"text": "generation across modalities", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["generation", "across", "modalities"], "offsets": [153, 154, 155]}], "trigger": {"text": "facilitate", "tokens": ["facilitate"], "offsets": [149]}}, {"event_type": "FAC", "arguments": [{"text": "effectiveness of our proposed model", "nugget_type": "STR", "argument_type": "Subject", "tokens": ["effectiveness", "of", "our", "proposed", "model"], "offsets": [161, 162, 163, 164, 165]}], "trigger": {"text": "illustrate", "tokens": ["illustrate"], "offsets": [159]}}, {"event_type": "FAC", "arguments": [{"text": "cross - modal memory networks", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["cross", "-", "modal", "memory", "networks"], "offsets": [112, 113, 114, 115, 116]}, {"text": "iu x - ray", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["iu", "x", "-", "ray"], "offsets": [190, 191, 192, 193]}, {"text": "mimic - cxr", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["mimic", "-", "cxr"], "offsets": [195, 196, 197]}, {"text": "state - of - the - art performance", "nugget_type": "STR", "argument_type": "Subject", "tokens": ["state", "-", "of", "-", "the", "-", "art", "performance"], "offsets": [168, 169, 170, 171, 172, 173, 174, 175]}], "trigger": {"text": "achieved", "tokens": ["achieved"], "offsets": [177]}}, {"event_type": "FIN", "arguments": [{"text": "better align", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["better", "align"], "offsets": [209, 210]}], "trigger": {"text": "prove", "tokens": ["prove"], "offsets": [202]}}, {"event_type": "FAC", "arguments": [{"text": "cross - modal memory networks", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["cross", "-", "modal", "memory", "networks"], "offsets": [112, 113, 114, 115, 116]}, {"text": "information from radiology images and texts", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["information", "from", "radiology", "images", "and", "texts"], "offsets": [211, 212, 213, 214, 215, 216]}, {"text": "generating", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["generating"], "offsets": [221]}], "trigger": {"text": "better align", "tokens": ["better", "align"], "offsets": [209, 210]}}, {"event_type": "PUR", "arguments": [{"text": "more accurate reports", "nugget_type": "STR", "argument_type": "Aim", "tokens": ["more", "accurate", "reports"], "offsets": [222, 223, 224]}], "trigger": {"text": "generating", "tokens": ["generating"], "offsets": [221]}}], "document": ["medical", "imaging", "plays", "a", "significant", "role", "in", "clinical", "practice", "of", "medical", "diagnosis", ",", "where", "the", "text", "reports", "of", "the", "images", "are", "essential", "in", "understanding", "them", "and", "facilitating", "later", "treatments", ".", "by", "generating", "the", "reports", "automatically", ",", "it", "is", "beneficial", "to", "help", "lighten", "the", "burden", "of", "radiologists", "and", "significantly", "promote", "clinical", "automation", ",", "which", "already", "attracts", "much", "attention", "in", "applying", "artificial", "intelligence", "to", "medical", "domain", ".", "previous", "studies", "mainly", "follow", "the", "encoder", "-", "decoder", "paradigm", "and", "focus", "on", "the", "aspect", "of", "text", "generation", ",", "with", "few", "studies", "considering", "the", "importance", "of", "cross", "-", "modal", "mappings", "and", "explicitly", "exploit", "such", "mappings", "to", "facilitate", "radiology", "report", "generation", ".", "in", "this", "paper", ",", "we", "propose", "a", "cross", "-", "modal", "memory", "networks", "(", "cmn", ")", "to", "enhance", "the", "encoder", "-", "decoder", "framework", "for", "radiology", "report", "generation", ",", "where", "a", "shared", "memory", "is", "designed", "to", "record", "the", "alignment", "between", "images", "and", "texts", "so", "as", "to", "facilitate", "the", "interaction", "and", "generation", "across", "modalities", ".", "experimental", "results", "illustrate", "the", "effectiveness", "of", "our", "proposed", "model", ",", "where", "state", "-", "of", "-", "the", "-", "art", "performance", "is", "achieved", "on", "two", "widely", "used", "benchmark", "datasets", ",", "i", ".", "e", ".", ",", "iu", "x", "-", "ray", "and", "mimic", "-", "cxr", ".", "further", "analyses", "also", "prove", "that", "our", "model", "is", "able", "to", "better", "align", "information", "from", "radiology", "images", "and", "texts", "so", "as", "to", "help", "generating", "more", "accurate", "reports", "in", "terms", "of", "clinical", "indicators", "."]}, {"venue": "ACL", "title": "Word2Sense: Sparse Interpretable Word Embeddings", "abstract": "We present an unsupervised method to generate Word2Sense word embeddings that are interpretable \u2014 each dimension of the embedding space corresponds to a fine-grained sense, and the non-negative value of the embedding along the j-th dimension represents the relevance of the j-th sense to the word. The underlying LDA-based generative model can be extended to refine the representation of a polysemous word in a short context, allowing us to use the embedings in contextual tasks. On computational NLP tasks, Word2Sense embeddings compare well with other word embeddings generated by unsupervised methods. Across tasks such as word similarity, entailment, sense induction, and contextual interpretation, Word2Sense is competitive with the state-of-the-art method for that task. Word2Sense embeddings are at least as sparse and fast to compute as prior art.", "doc_id": "41bc51897b3d73ac08a08ae9e4aa5761", "publication_year": 2019, "sentences": ["we present an unsupervised method to generate word2sense word embeddings that are interpretable \u2014 each dimension of the embedding space corresponds to a fine - grained sense , and the non - negative value of the embedding along the j - th dimension represents the relevance of the j - th sense to the word .", "the underlying lda - based generative model can be extended to refine the representation of a polysemous word in a short context , allowing us to use the embedings in contextual tasks .", "on computational nlp tasks , word2sense embeddings compare well with other word embeddings generated by unsupervised methods .", "across tasks such as word similarity , entailment , sense induction , and contextual interpretation , word2sense is competitive with the state - of - the - art method for that task .", "word2sense embeddings are at least as sparse and fast to compute as prior art ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [0]}, {"text": "unsupervised method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["unsupervised", "method"], "offsets": [3, 4]}, {"text": "generate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["generate"], "offsets": [6]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [1]}}, {"event_type": "PUR", "arguments": [{"text": "word2sense word embeddings", "nugget_type": "MOD", "argument_type": "Aim", "tokens": ["word2sense", "word", "embeddings"], "offsets": [7, 8, 9]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [6]}}, {"event_type": "MDS", "arguments": [{"text": "underlying lda - based generative model", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["underlying", "lda", "-", "based", "generative", "model"], "offsets": [57, 58, 59, 60, 61, 62]}, {"text": "refine", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["refine"], "offsets": [67]}], "trigger": {"text": "extended", "tokens": ["extended"], "offsets": [65]}}, {"event_type": "PUR", "arguments": [{"text": "representation of a polysemous word in a short context", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["representation", "of", "a", "polysemous", "word", "in", "a", "short", "context"], "offsets": [69, 70, 71, 72, 73, 74, 75, 76, 77]}], "trigger": {"text": "refine", "tokens": ["refine"], "offsets": [67]}}, {"event_type": "CMP", "arguments": [{"text": "word2sense embeddings", "nugget_type": "MOD", "argument_type": "Arg1", "tokens": ["word2sense", "embeddings"], "offsets": [94, 95]}, {"text": "well", "nugget_type": "STR", "argument_type": "Result", "tokens": ["well"], "offsets": [97]}, {"text": "other word embeddings generated by unsupervised methods", "nugget_type": "MOD", "argument_type": "Arg2", "tokens": ["other", "word", "embeddings", "generated", "by", "unsupervised", "methods"], "offsets": [99, 100, 101, 102, 103, 104, 105]}, {"text": "on computational nlp tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "computational", "nlp", "tasks"], "offsets": [89, 90, 91, 92]}], "trigger": {"text": "compare", "tokens": ["compare"], "offsets": [96]}}, {"event_type": "CMP", "arguments": [{"text": "word2sense", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["word2sense"], "offsets": [123]}, {"text": "state - of - the - art method", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art", "method"], "offsets": [128, 129, 130, 131, 132, 133, 134, 135]}, {"text": "across tasks such as word similarity , entailment , sense induction , and contextual interpretation", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["across", "tasks", "such", "as", "word", "similarity", ",", "entailment", ",", "sense", "induction", ",", "and", "contextual", "interpretation"], "offsets": [107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121]}], "trigger": {"text": "competitive", "tokens": ["competitive"], "offsets": [125]}}, {"event_type": "CMP", "arguments": [{"text": "word2sense embeddings", "nugget_type": "MOD", "argument_type": "Arg1", "tokens": ["word2sense", "embeddings"], "offsets": [140, 141]}, {"text": "prior art", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["prior", "art"], "offsets": [152, 153]}, {"text": "sparse", "nugget_type": "STR", "argument_type": "Result", "tokens": ["sparse"], "offsets": [146]}, {"text": "fast", "nugget_type": "STR", "argument_type": "Result", "tokens": ["fast"], "offsets": [148]}, {"text": "least", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["least"], "offsets": [144]}], "trigger": {"text": "compute", "tokens": ["compute"], "offsets": [150]}}], "document": ["we", "present", "an", "unsupervised", "method", "to", "generate", "word2sense", "word", "embeddings", "that", "are", "interpretable", "\u2014", "each", "dimension", "of", "the", "embedding", "space", "corresponds", "to", "a", "fine", "-", "grained", "sense", ",", "and", "the", "non", "-", "negative", "value", "of", "the", "embedding", "along", "the", "j", "-", "th", "dimension", "represents", "the", "relevance", "of", "the", "j", "-", "th", "sense", "to", "the", "word", ".", "the", "underlying", "lda", "-", "based", "generative", "model", "can", "be", "extended", "to", "refine", "the", "representation", "of", "a", "polysemous", "word", "in", "a", "short", "context", ",", "allowing", "us", "to", "use", "the", "embedings", "in", "contextual", "tasks", ".", "on", "computational", "nlp", "tasks", ",", "word2sense", "embeddings", "compare", "well", "with", "other", "word", "embeddings", "generated", "by", "unsupervised", "methods", ".", "across", "tasks", "such", "as", "word", "similarity", ",", "entailment", ",", "sense", "induction", ",", "and", "contextual", "interpretation", ",", "word2sense", "is", "competitive", "with", "the", "state", "-", "of", "-", "the", "-", "art", "method", "for", "that", "task", ".", "word2sense", "embeddings", "are", "at", "least", "as", "sparse", "and", "fast", "to", "compute", "as", "prior", "art", "."]}, {"venue": "ACL", "title": "SenseBERT: Driving Some Sense into BERT", "abstract": "The ability to learn from large unlabeled corpora has allowed neural language models to advance the frontier in natural language understanding. However, existing self-supervision techniques operate at the word form level, which serves as a surrogate for the underlying semantic content. This paper proposes a method to employ weak-supervision directly at the word sense level. Our model, named SenseBERT, is pre-trained to predict not only the masked words but also their WordNet supersenses. Accordingly, we attain a lexical-semantic level language model, without the use of human annotation. SenseBERT achieves significantly improved lexical understanding, as we demonstrate by experimenting on SemEval Word Sense Disambiguation, and by attaining a state of the art result on the \u2018Word in Context\u2019 task.", "doc_id": "350fdd654080714ba5fa403c22fef3d6", "publication_year": 2020, "sentences": ["the ability to learn from large unlabeled corpora has allowed neural language models to advance the frontier in natural language understanding .", "however , existing self - supervision techniques operate at the word form level , which serves as a surrogate for the underlying semantic content .", "this paper proposes a method to employ weak - supervision directly at the word sense level .", "our model , named sensebert , is pre - trained to predict not only the masked words but also their wordnet supersenses .", "accordingly , we attain a lexical - semantic level language model , without the use of human annotation .", "sensebert achieves significantly improved lexical understanding , as we demonstrate by experimenting on semeval word sense disambiguation , and by attaining a state of the art result on the \u2018 word in context \u2019 task ."], "events": [{"event_type": "ITT", "arguments": [{"text": "natural language understanding", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["natural", "language", "understanding"], "offsets": [18, 19, 20]}], "trigger": {"text": "advance", "tokens": ["advance"], "offsets": [14]}}, {"event_type": "PRP", "arguments": [{"text": "method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["method"], "offsets": [51]}, {"text": "employ", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["employ"], "offsets": [53]}], "trigger": {"text": "proposes", "tokens": ["proposes"], "offsets": [49]}}, {"event_type": "PUR", "arguments": [{"text": "weak - supervision", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["weak", "-", "supervision"], "offsets": [54, 55, 56]}, {"text": "at the word sense level", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["at", "the", "word", "sense", "level"], "offsets": [58, 59, 60, 61, 62]}], "trigger": {"text": "employ", "tokens": ["employ"], "offsets": [53]}}, {"event_type": "WKS", "arguments": [{"text": "sensebert", "nugget_type": "APP", "argument_type": "Content", "tokens": ["sensebert"], "offsets": [68]}, {"text": "predict", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["predict"], "offsets": [75]}], "trigger": {"text": "pre - trained", "tokens": ["pre", "-", "trained"], "offsets": [71, 72, 73]}}, {"event_type": "PUR", "arguments": [{"text": "not only the masked words but also their wordnet supersenses", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["not", "only", "the", "masked", "words", "but", "also", "their", "wordnet", "supersenses"], "offsets": [76, 77, 78, 79, 80, 81, 82, 83, 84, 85]}], "trigger": {"text": "predict", "tokens": ["predict"], "offsets": [75]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [89]}, {"text": "lexical - semantic level language model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["lexical", "-", "semantic", "level", "language", "model"], "offsets": [92, 93, 94, 95, 96, 97]}, {"text": "without the use of human annotation", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["without", "the", "use", "of", "human", "annotation"], "offsets": [99, 100, 101, 102, 103, 104]}], "trigger": {"text": "attain", "tokens": ["attain"], "offsets": [90]}}, {"event_type": "FAC", "arguments": [{"text": "sensebert", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["sensebert"], "offsets": [106]}, {"text": "significantly", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significantly"], "offsets": [108]}, {"text": "improved lexical understanding", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["improved", "lexical", "understanding"], "offsets": [109, 110, 111]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [107]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [114]}, {"text": "semeval word sense disambiguation", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["semeval", "word", "sense", "disambiguation"], "offsets": [119, 120, 121, 122]}], "trigger": {"text": "experimenting", "tokens": ["experimenting"], "offsets": [117]}}], "document": ["the", "ability", "to", "learn", "from", "large", "unlabeled", "corpora", "has", "allowed", "neural", "language", "models", "to", "advance", "the", "frontier", "in", "natural", "language", "understanding", ".", "however", ",", "existing", "self", "-", "supervision", "techniques", "operate", "at", "the", "word", "form", "level", ",", "which", "serves", "as", "a", "surrogate", "for", "the", "underlying", "semantic", "content", ".", "this", "paper", "proposes", "a", "method", "to", "employ", "weak", "-", "supervision", "directly", "at", "the", "word", "sense", "level", ".", "our", "model", ",", "named", "sensebert", ",", "is", "pre", "-", "trained", "to", "predict", "not", "only", "the", "masked", "words", "but", "also", "their", "wordnet", "supersenses", ".", "accordingly", ",", "we", "attain", "a", "lexical", "-", "semantic", "level", "language", "model", ",", "without", "the", "use", "of", "human", "annotation", ".", "sensebert", "achieves", "significantly", "improved", "lexical", "understanding", ",", "as", "we", "demonstrate", "by", "experimenting", "on", "semeval", "word", "sense", "disambiguation", ",", "and", "by", "attaining", "a", "state", "of", "the", "art", "result", "on", "the", "\u2018", "word", "in", "context", "\u2019", "task", "."]}, {"venue": "ACL", "title": "N-ary Constituent Tree Parsing with Recursive Semi-Markov Model", "abstract": "In this paper, we study the task of graph-based constituent parsing in the setting that binarization is not conducted as a pre-processing step, where a constituent tree may consist of nodes with more than two children. Previous graph-based methods on this setting typically generate hidden nodes with the dummy label inside the n-ary nodes, in order to transform the tree into a binary tree for prediction. The limitation is that the hidden nodes break the sibling relations of the n-ary node\u2019s children. Consequently, the dependencies of such sibling constituents might not be accurately modeled and is being ignored. To solve this limitation, we propose a novel graph-based framework, which is called \u201crecursive semi-Markov model\u201d. The main idea is to utilize 1-order semi-Markov model to predict the immediate children sequence of a constituent candidate, which then recursively serves as a child candidate of its parent. In this manner, the dependencies of sibling constituents can be described by 1-order transition features, which solves the above limitation. Through experiments, the proposed framework obtains the F1 of 95.92% and 92.50% on the datasets of PTB and CTB 5.1 respectively. Specially, the recursive semi-Markov model shows advantages in modeling nodes with more than two children, whose average F1 can be improved by 0.3-1.1 points in PTB and 2.3-6.8 points in CTB 5.1.", "doc_id": "004b02f1622f97acc00647870a409303", "publication_year": 2021, "sentences": ["in this paper , we study the task of graph - based constituent parsing in the setting that binarization is not conducted as a pre - processing step , where a constituent tree may consist of nodes with more than two children .", "previous graph - based methods on this setting typically generate hidden nodes with the dummy label inside the n - ary nodes , in order to transform the tree into a binary tree for prediction .", "the limitation is that the hidden nodes break the sibling relations of the n - ary node \u2019 s children .", "consequently , the dependencies of such sibling constituents might not be accurately modeled and is being ignored .", "to solve this limitation , we propose a novel graph - based framework , which is called \u201c recursive semi - markov model \u201d .", "the main idea is to utilize 1 - order semi - markov model to predict the immediate children sequence of a constituent candidate , which then recursively serves as a child candidate of its parent .", "in this manner , the dependencies of sibling constituents can be described by 1 - order transition features , which solves the above limitation .", "through experiments , the proposed framework obtains the f1 of 95 . 92 % and 92 . 50 % on the datasets of ptb and ctb 5 . 1 respectively .", "specially , the recursive semi - markov model shows advantages in modeling nodes with more than two children , whose average f1 can be improved by 0 . 3 - 1 . 1 points in ptb and 2 . 3 - 6 . 8 points in ctb 5 . 1 ."], "events": [{"event_type": "ITT", "arguments": [{"text": "graph - based constituent parsing", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["graph", "-", "based", "constituent", "parsing"], "offsets": [9, 10, 11, 12, 13]}, {"text": "in the setting that binarization", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "setting", "that", "binarization"], "offsets": [14, 15, 16, 17, 18]}], "trigger": {"text": "study", "tokens": ["study"], "offsets": [5]}}, {"event_type": "RWS", "arguments": [{"text": "previous graph - based methods", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["previous", "graph", "-", "based", "methods"], "offsets": [43, 44, 45, 46, 47]}, {"text": "hidden nodes", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["hidden", "nodes"], "offsets": [53, 54]}, {"text": "dummy label inside the n - ary nodes", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["dummy", "label", "inside", "the", "n", "-", "ary", "nodes"], "offsets": [57, 58, 59, 60, 61, 62, 63, 64]}, {"text": "transform", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["transform"], "offsets": [69]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [52]}}, {"event_type": "PUR", "arguments": [{"text": "tree", "nugget_type": "MOD", "argument_type": "Aim", "tokens": ["tree"], "offsets": [71]}], "trigger": {"text": "transform", "tokens": ["transform"], "offsets": [69]}}, {"event_type": "RWF", "arguments": [{"text": "break", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["break"], "offsets": [86]}, {"text": "hidden nodes", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["hidden", "nodes"], "offsets": [84, 85]}], "trigger": {"text": "break", "tokens": ["break"], "offsets": [86]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [123]}, {"text": "graph - based framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["graph", "-", "based", "framework"], "offsets": [127, 128, 129, 130]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [124]}}, {"event_type": "MDS", "arguments": [{"text": "1 - order semi - markov model", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["1", "-", "order", "semi", "-", "markov", "model"], "offsets": [149, 150, 151, 152, 153, 154, 155]}, {"text": "predict", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["predict"], "offsets": [157]}], "trigger": {"text": "utilize", "tokens": ["utilize"], "offsets": [148]}}, {"event_type": "PUR", "arguments": [{"text": "immediate children sequence of a constituent candidate", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["immediate", "children", "sequence", "of", "a", "constituent", "candidate"], "offsets": [159, 160, 161, 162, 163, 164, 165]}], "trigger": {"text": "predict", "tokens": ["predict"], "offsets": [157]}}, {"event_type": "MDS", "arguments": [{"text": "child candidate of its parent", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["child", "candidate", "of", "its", "parent"], "offsets": [173, 174, 175, 176, 177]}], "trigger": {"text": "recursively serves", "tokens": ["recursively", "serves"], "offsets": [169, 170]}}, {"event_type": "MDS", "arguments": [{"text": "dependencies of sibling constituents", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["dependencies", "of", "sibling", "constituents"], "offsets": [184, 185, 186, 187]}, {"text": "1 - order transition features", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["1", "-", "order", "transition", "features"], "offsets": [192, 193, 194, 195, 196]}], "trigger": {"text": "described", "tokens": ["described"], "offsets": [190]}}, {"event_type": "FAC", "arguments": [{"text": "graph - based framework", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["graph", "-", "based", "framework"], "offsets": [127, 128, 129, 130]}, {"text": "95 . 92 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["95", ".", "92", "%"], "offsets": [214, 215, 216, 217]}, {"text": "ptb", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["ptb"], "offsets": [227]}, {"text": "f1", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["f1"], "offsets": [212]}], "trigger": {"text": "obtains", "tokens": ["obtains"], "offsets": [210]}}, {"event_type": "FAC", "arguments": [{"text": "graph - based framework", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["graph", "-", "based", "framework"], "offsets": [127, 128, 129, 130]}, {"text": "f1", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["f1"], "offsets": [212]}, {"text": "92 . 50 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["92", ".", "50", "%"], "offsets": [219, 220, 221, 222]}, {"text": "ctb 5 . 1", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["ctb", "5", ".", "1"], "offsets": [229, 230, 231, 232]}], "trigger": {"text": "obtains", "tokens": ["obtains"], "offsets": [210]}}, {"event_type": "CMP", "arguments": [{"text": "recursive semi - markov model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["recursive", "semi", "-", "markov", "model"], "offsets": [238, 239, 240, 241, 242]}, {"text": "improved", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improved"], "offsets": [259]}, {"text": "average f1", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["average", "f1"], "offsets": [255, 256]}, {"text": "ptb", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["ptb"], "offsets": [270]}], "trigger": {"text": "improved", "tokens": ["improved"], "offsets": [259]}}, {"event_type": "CMP", "arguments": [{"text": "recursive semi - markov model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["recursive", "semi", "-", "markov", "model"], "offsets": [238, 239, 240, 241, 242]}, {"text": "improved", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improved"], "offsets": [259]}, {"text": "average f1", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["average", "f1"], "offsets": [255, 256]}, {"text": "2 . 3 - 6 . 8 points", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["2", ".", "3", "-", "6", ".", "8", "points"], "offsets": [272, 273, 274, 275, 276, 277, 278, 279]}], "trigger": {"text": "improved", "tokens": ["improved"], "offsets": [259]}}, {"event_type": "FAC", "arguments": [{"text": "recursive semi - markov model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["recursive", "semi", "-", "markov", "model"], "offsets": [238, 239, 240, 241, 242]}, {"text": "advantages in modeling nodes with more than two children", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["advantages", "in", "modeling", "nodes", "with", "more", "than", "two", "children"], "offsets": [244, 245, 246, 247, 248, 249, 250, 251, 252]}], "trigger": {"text": "shows", "tokens": ["shows"], "offsets": [243]}}], "document": ["in", "this", "paper", ",", "we", "study", "the", "task", "of", "graph", "-", "based", "constituent", "parsing", "in", "the", "setting", "that", "binarization", "is", "not", "conducted", "as", "a", "pre", "-", "processing", "step", ",", "where", "a", "constituent", "tree", "may", "consist", "of", "nodes", "with", "more", "than", "two", "children", ".", "previous", "graph", "-", "based", "methods", "on", "this", "setting", "typically", "generate", "hidden", "nodes", "with", "the", "dummy", "label", "inside", "the", "n", "-", "ary", "nodes", ",", "in", "order", "to", "transform", "the", "tree", "into", "a", "binary", "tree", "for", "prediction", ".", "the", "limitation", "is", "that", "the", "hidden", "nodes", "break", "the", "sibling", "relations", "of", "the", "n", "-", "ary", "node", "\u2019", "s", "children", ".", "consequently", ",", "the", "dependencies", "of", "such", "sibling", "constituents", "might", "not", "be", "accurately", "modeled", "and", "is", "being", "ignored", ".", "to", "solve", "this", "limitation", ",", "we", "propose", "a", "novel", "graph", "-", "based", "framework", ",", "which", "is", "called", "\u201c", "recursive", "semi", "-", "markov", "model", "\u201d", ".", "the", "main", "idea", "is", "to", "utilize", "1", "-", "order", "semi", "-", "markov", "model", "to", "predict", "the", "immediate", "children", "sequence", "of", "a", "constituent", "candidate", ",", "which", "then", "recursively", "serves", "as", "a", "child", "candidate", "of", "its", "parent", ".", "in", "this", "manner", ",", "the", "dependencies", "of", "sibling", "constituents", "can", "be", "described", "by", "1", "-", "order", "transition", "features", ",", "which", "solves", "the", "above", "limitation", ".", "through", "experiments", ",", "the", "proposed", "framework", "obtains", "the", "f1", "of", "95", ".", "92", "%", "and", "92", ".", "50", "%", "on", "the", "datasets", "of", "ptb", "and", "ctb", "5", ".", "1", "respectively", ".", "specially", ",", "the", "recursive", "semi", "-", "markov", "model", "shows", "advantages", "in", "modeling", "nodes", "with", "more", "than", "two", "children", ",", "whose", "average", "f1", "can", "be", "improved", "by", "0", ".", "3", "-", "1", ".", "1", "points", "in", "ptb", "and", "2", ".", "3", "-", "6", ".", "8", "points", "in", "ctb", "5", ".", "1", "."]}, {"venue": "ACL", "title": "Automated Concatenation of Embeddings for Structured Prediction", "abstract": "Pretrained contextualized embeddings are powerful word representations for structured prediction tasks. Recent work found that better word representations can be obtained by concatenating different types of embeddings. However, the selection of embeddings to form the best concatenated representation usually varies depending on the task and the collection of candidate embeddings, and the ever-increasing number of embedding types makes it a more difficult problem. In this paper, we propose Automated Concatenation of Embeddings (ACE) to automate the process of finding better concatenations of embeddings for structured prediction tasks, based on a formulation inspired by recent progress on neural architecture search. Specifically, a controller alternately samples a concatenation of embeddings, according to its current belief of the effectiveness of individual embedding types in consideration for a task, and updates the belief based on a reward. We follow strategies in reinforcement learning to optimize the parameters of the controller and compute the reward based on the accuracy of a task model, which is fed with the sampled concatenation as input and trained on a task dataset. Empirical results on 6 tasks and 21 datasets show that our approach outperforms strong baselines and achieves state-of-the-art performance with fine-tuned embeddings in all the evaluations.", "doc_id": "40af5d7b5d59f6af0b9e3a5e6d876474", "publication_year": 2021, "sentences": ["pretrained contextualized embeddings are powerful word representations for structured prediction tasks .", "recent work found that better word representations can be obtained by concatenating different types of embeddings .", "however , the selection of embeddings to form the best concatenated representation usually varies depending on the task and the collection of candidate embeddings , and the ever - increasing number of embedding types makes it a more difficult problem .", "in this paper , we propose automated concatenation of embeddings ( ace ) to automate the process of finding better concatenations of embeddings for structured prediction tasks , based on a formulation inspired by recent progress on neural architecture search .", "specifically , a controller alternately samples a concatenation of embeddings , according to its current belief of the effectiveness of individual embedding types in consideration for a task , and updates the belief based on a reward .", "we follow strategies in reinforcement learning to optimize the parameters of the controller and compute the reward based on the accuracy of a task model , which is fed with the sampled concatenation as input and trained on a task dataset .", "empirical results on 6 tasks and 21 datasets show that our approach outperforms strong baselines and achieves state - of - the - art performance with fine - tuned embeddings in all the evaluations ."], "events": [{"event_type": "ITT", "arguments": [{"text": "pretrained contextualized embeddings", "nugget_type": "MOD", "argument_type": "Target", "tokens": ["pretrained", "contextualized", "embeddings"], "offsets": [0, 1, 2]}], "trigger": {"text": "powerful word representations", "tokens": ["powerful", "word", "representations"], "offsets": [4, 5, 6]}}, {"event_type": "RWS", "arguments": [{"text": "better word representations", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["better", "word", "representations"], "offsets": [16, 17, 18]}, {"text": "different types of embeddings", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["different", "types", "of", "embeddings"], "offsets": [24, 25, 26, 27]}], "trigger": {"text": "concatenating", "tokens": ["concatenating"], "offsets": [23]}}, {"event_type": "RWF", "arguments": [{"text": "ever - increasing number of embedding types", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["ever", "-", "increasing", "number", "of", "embedding", "types"], "offsets": [56, 57, 58, 59, 60, 61, 62]}, {"text": "difficult problem", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["difficult", "problem"], "offsets": [67, 68]}], "trigger": {"text": "difficult problem", "tokens": ["difficult", "problem"], "offsets": [67, 68]}}, {"event_type": "PUR", "arguments": [{"text": "best concatenated representation", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["best", "concatenated", "representation"], "offsets": [38, 39, 40]}], "trigger": {"text": "form", "tokens": ["form"], "offsets": [36]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [74]}, {"text": "automated concatenation of embeddings", "nugget_type": "APP", "argument_type": "Content", "tokens": ["automated", "concatenation", "of", "embeddings"], "offsets": [76, 77, 78, 79]}, {"text": "automate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["automate"], "offsets": [84]}, {"text": "structured prediction tasks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["structured", "prediction", "tasks"], "offsets": [94, 95, 96]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [75]}}, {"event_type": "PUR", "arguments": [{"text": "process of finding better concatenations of embeddings", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["process", "of", "finding", "better", "concatenations", "of", "embeddings"], "offsets": [86, 87, 88, 89, 90, 91, 92]}], "trigger": {"text": "automate", "tokens": ["automate"], "offsets": [84]}}, {"event_type": "WKS", "arguments": [{"text": "belief", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["belief"], "offsets": [143]}, {"text": "based on a reward", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["based", "on", "a", "reward"], "offsets": [144, 145, 146, 147]}], "trigger": {"text": "updates", "tokens": ["updates"], "offsets": [141]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [149]}, {"text": "optimize", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["optimize"], "offsets": [156]}, {"text": "strategies", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["strategies"], "offsets": [151]}], "trigger": {"text": "follow", "tokens": ["follow"], "offsets": [150]}}, {"event_type": "PUR", "arguments": [{"text": "parameters of the controller", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["parameters", "of", "the", "controller"], "offsets": [158, 159, 160, 161]}], "trigger": {"text": "optimize", "tokens": ["optimize"], "offsets": [156]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [149]}, {"text": "reward", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["reward"], "offsets": [165]}, {"text": "based on the accuracy of a task model", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["based", "on", "the", "accuracy", "of", "a", "task", "model"], "offsets": [166, 167, 168, 169, 170, 171, 172, 173]}], "trigger": {"text": "compute", "tokens": ["compute"], "offsets": [163]}}, {"event_type": "MDS", "arguments": [{"text": "task model", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["task", "model"], "offsets": [172, 173]}, {"text": "sampled concatenation", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["sampled", "concatenation"], "offsets": [180, 181]}, {"text": "input", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["input"], "offsets": [183]}], "trigger": {"text": "fed", "tokens": ["fed"], "offsets": [177]}}, {"event_type": "WKS", "arguments": [{"text": "task dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["task", "dataset"], "offsets": [188, 189]}, {"text": "task model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["task", "model"], "offsets": [172, 173]}], "trigger": {"text": "trained", "tokens": ["trained"], "offsets": [185]}}, {"event_type": "FIN", "arguments": [{"text": "outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforms"], "offsets": [203]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [199]}}, {"event_type": "CMP", "arguments": [{"text": "automated concatenation of embeddings", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["automated", "concatenation", "of", "embeddings"], "offsets": [76, 77, 78, 79]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [203]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [203]}}, {"event_type": "FAC", "arguments": [{"text": "state - of - the - art performance", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art", "performance"], "offsets": [208, 209, 210, 211, 212, 213, 214, 215]}, {"text": "automated concatenation of embeddings", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["automated", "concatenation", "of", "embeddings"], "offsets": [76, 77, 78, 79]}, {"text": "with fine - tuned embeddings in all the evaluations", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "fine", "-", "tuned", "embeddings", "in", "all", "the", "evaluations"], "offsets": [216, 217, 218, 219, 220, 221, 222, 223, 224]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [207]}}], "document": ["pretrained", "contextualized", "embeddings", "are", "powerful", "word", "representations", "for", "structured", "prediction", "tasks", ".", "recent", "work", "found", "that", "better", "word", "representations", "can", "be", "obtained", "by", "concatenating", "different", "types", "of", "embeddings", ".", "however", ",", "the", "selection", "of", "embeddings", "to", "form", "the", "best", "concatenated", "representation", "usually", "varies", "depending", "on", "the", "task", "and", "the", "collection", "of", "candidate", "embeddings", ",", "and", "the", "ever", "-", "increasing", "number", "of", "embedding", "types", "makes", "it", "a", "more", "difficult", "problem", ".", "in", "this", "paper", ",", "we", "propose", "automated", "concatenation", "of", "embeddings", "(", "ace", ")", "to", "automate", "the", "process", "of", "finding", "better", "concatenations", "of", "embeddings", "for", "structured", "prediction", "tasks", ",", "based", "on", "a", "formulation", "inspired", "by", "recent", "progress", "on", "neural", "architecture", "search", ".", "specifically", ",", "a", "controller", "alternately", "samples", "a", "concatenation", "of", "embeddings", ",", "according", "to", "its", "current", "belief", "of", "the", "effectiveness", "of", "individual", "embedding", "types", "in", "consideration", "for", "a", "task", ",", "and", "updates", "the", "belief", "based", "on", "a", "reward", ".", "we", "follow", "strategies", "in", "reinforcement", "learning", "to", "optimize", "the", "parameters", "of", "the", "controller", "and", "compute", "the", "reward", "based", "on", "the", "accuracy", "of", "a", "task", "model", ",", "which", "is", "fed", "with", "the", "sampled", "concatenation", "as", "input", "and", "trained", "on", "a", "task", "dataset", ".", "empirical", "results", "on", "6", "tasks", "and", "21", "datasets", "show", "that", "our", "approach", "outperforms", "strong", "baselines", "and", "achieves", "state", "-", "of", "-", "the", "-", "art", "performance", "with", "fine", "-", "tuned", "embeddings", "in", "all", "the", "evaluations", "."]}, {"venue": "ACL", "title": "Compositional Generalization and Natural Language Variation: Can a Semantic Parsing Approach Handle Both?", "abstract": "Sequence-to-sequence models excel at handling natural language variation, but have been shown to struggle with out-of-distribution compositional generalization. This has motivated new specialized architectures with stronger compositional biases, but most of these approaches have only been evaluated on synthetically-generated datasets, which are not representative of natural language variation. In this work we ask: can we develop a semantic parsing approach that handles both natural language variation and compositional generalization? To better assess this capability, we propose new train and test splits of non-synthetic datasets. We demonstrate that strong existing approaches do not perform well across a broad set of evaluations. We also propose NQG-T5, a hybrid model that combines a high-precision grammar-based approach with a pre-trained sequence-to-sequence model. It outperforms existing approaches across several compositional generalization challenges on non-synthetic data, while also being competitive with the state-of-the-art on standard evaluations. While still far from solving this problem, our study highlights the importance of diverse evaluations and the open challenge of handling both compositional generalization and natural language variation in semantic parsing.", "doc_id": "3cca7a79d93eb5b9239ce2e385af82c6", "publication_year": 2021, "sentences": ["sequence - to - sequence models excel at handling natural language variation , but have been shown to struggle with out - of - distribution compositional generalization .", "this has motivated new specialized architectures with stronger compositional biases , but most of these approaches have only been evaluated on synthetically - generated datasets , which are not representative of natural language variation .", "in this work we ask : can we develop a semantic parsing approach that handles both natural language variation and compositional generalization ?", "to better assess this capability , we propose new train and test splits of non - synthetic datasets .", "we demonstrate that strong existing approaches do not perform well across a broad set of evaluations .", "we also propose nqg - t5 , a hybrid model that combines a high - precision grammar - based approach with a pre - trained sequence - to - sequence model .", "it outperforms existing approaches across several compositional generalization challenges on non - synthetic data , while also being competitive with the state - of - the - art on standard evaluations .", "while still far from solving this problem , our study highlights the importance of diverse evaluations and the open challenge of handling both compositional generalization and natural language variation in semantic parsing ."], "events": [{"event_type": "ITT", "arguments": [{"text": "sequence - to - sequence models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["sequence", "-", "to", "-", "sequence", "models"], "offsets": [0, 1, 2, 3, 4, 5]}], "trigger": {"text": "excel", "tokens": ["excel"], "offsets": [6]}}, {"event_type": "RWS", "arguments": [{"text": "most of these approaches", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["most", "of", "these", "approaches"], "offsets": [40, 41, 42, 43]}, {"text": "synthetically - generated datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["synthetically", "-", "generated", "datasets"], "offsets": [49, 50, 51, 52]}], "trigger": {"text": "evaluated", "tokens": ["evaluated"], "offsets": [47]}}, {"event_type": "RWF", "arguments": [{"text": "not representative", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["not", "representative"], "offsets": [56, 57]}], "trigger": {"text": "not representative", "tokens": ["not", "representative"], "offsets": [56, 57]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [92]}, {"text": "new train and test splits of non - synthetic datasets", "nugget_type": "DST", "argument_type": "Content", "tokens": ["new", "train", "and", "test", "splits", "of", "non", "-", "synthetic", "datasets"], "offsets": [94, 95, 96, 97, 98, 99, 100, 101, 102, 103]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [93]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [105]}, {"text": "not perform well", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["not", "perform", "well"], "offsets": [112, 113, 114]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [106]}}, {"event_type": "FAC", "arguments": [{"text": "strong existing approaches", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["strong", "existing", "approaches"], "offsets": [108, 109, 110]}, {"text": "across a broad set of evaluations", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["across", "a", "broad", "set", "of", "evaluations"], "offsets": [115, 116, 117, 118, 119, 120]}], "trigger": {"text": "not perform well", "tokens": ["not", "perform", "well"], "offsets": [112, 113, 114]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [122]}, {"text": "nqg - t5", "nugget_type": "APP", "argument_type": "Content", "tokens": ["nqg", "-", "t5"], "offsets": [125, 126, 127]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [124]}}, {"event_type": "MDS", "arguments": [{"text": "pre - trained sequence - to - sequence model", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["pre", "-", "trained", "sequence", "-", "to", "-", "sequence", "model"], "offsets": [144, 145, 146, 147, 148, 149, 150, 151, 152]}, {"text": "high - precision grammar - based approach", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["high", "-", "precision", "grammar", "-", "based", "approach"], "offsets": [135, 136, 137, 138, 139, 140, 141]}], "trigger": {"text": "combines", "tokens": ["combines"], "offsets": [133]}}, {"event_type": "CMP", "arguments": [{"text": "nqg - t5", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["nqg", "-", "t5"], "offsets": [125, 126, 127]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [155]}, {"text": "existing approaches", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["existing", "approaches"], "offsets": [156, 157]}, {"text": "across several compositional generalization challenges", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["across", "several", "compositional", "generalization", "challenges"], "offsets": [158, 159, 160, 161, 162]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [155]}}, {"event_type": "CMP", "arguments": [{"text": "state - of - the - art", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art"], "offsets": [175, 176, 177, 178, 179, 180, 181]}, {"text": "on standard evaluations", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "standard", "evaluations"], "offsets": [182, 183, 184]}], "trigger": {"text": "competitive", "tokens": ["competitive"], "offsets": [172]}}], "document": ["sequence", "-", "to", "-", "sequence", "models", "excel", "at", "handling", "natural", "language", "variation", ",", "but", "have", "been", "shown", "to", "struggle", "with", "out", "-", "of", "-", "distribution", "compositional", "generalization", ".", "this", "has", "motivated", "new", "specialized", "architectures", "with", "stronger", "compositional", "biases", ",", "but", "most", "of", "these", "approaches", "have", "only", "been", "evaluated", "on", "synthetically", "-", "generated", "datasets", ",", "which", "are", "not", "representative", "of", "natural", "language", "variation", ".", "in", "this", "work", "we", "ask", ":", "can", "we", "develop", "a", "semantic", "parsing", "approach", "that", "handles", "both", "natural", "language", "variation", "and", "compositional", "generalization", "?", "to", "better", "assess", "this", "capability", ",", "we", "propose", "new", "train", "and", "test", "splits", "of", "non", "-", "synthetic", "datasets", ".", "we", "demonstrate", "that", "strong", "existing", "approaches", "do", "not", "perform", "well", "across", "a", "broad", "set", "of", "evaluations", ".", "we", "also", "propose", "nqg", "-", "t5", ",", "a", "hybrid", "model", "that", "combines", "a", "high", "-", "precision", "grammar", "-", "based", "approach", "with", "a", "pre", "-", "trained", "sequence", "-", "to", "-", "sequence", "model", ".", "it", "outperforms", "existing", "approaches", "across", "several", "compositional", "generalization", "challenges", "on", "non", "-", "synthetic", "data", ",", "while", "also", "being", "competitive", "with", "the", "state", "-", "of", "-", "the", "-", "art", "on", "standard", "evaluations", ".", "while", "still", "far", "from", "solving", "this", "problem", ",", "our", "study", "highlights", "the", "importance", "of", "diverse", "evaluations", "and", "the", "open", "challenge", "of", "handling", "both", "compositional", "generalization", "and", "natural", "language", "variation", "in", "semantic", "parsing", "."]}, {"venue": "ACL", "title": "CLIP: A Dataset for Extracting Action Items for Physicians from Hospital Discharge Notes", "abstract": "Continuity of care is crucial to ensuring positive health outcomes for patients discharged from an inpatient hospital setting, and improved information sharing can help. To share information, caregivers write discharge notes containing action items to share with patients and their future caregivers, but these action items are easily lost due to the lengthiness of the documents. In this work, we describe our creation of a dataset of clinical action items annotated over MIMIC-III, the largest publicly available dataset of real clinical notes. This dataset, which we call CLIP, is annotated by physicians and covers 718 documents representing 100K sentences. We describe the task of extracting the action items from these documents as multi-aspect extractive summarization, with each aspect representing a type of action to be taken. We evaluate several machine learning models on this task, and show that the best models exploit in-domain language model pre-training on 59K unannotated documents, and incorporate context from neighboring sentences. We also propose an approach to pre-training data selection that allows us to explore the trade-off between size and domain-specificity of pre-training datasets for this task.", "doc_id": "70be45ce8f6edf62ffa8c3d96c89d31b", "publication_year": 2021, "sentences": ["continuity of care is crucial to ensuring positive health outcomes for patients discharged from an inpatient hospital setting , and improved information sharing can help .", "to share information , caregivers write discharge notes containing action items to share with patients and their future caregivers , but these action items are easily lost due to the lengthiness of the documents .", "in this work , we describe our creation of a dataset of clinical action items annotated over mimic - iii , the largest publicly available dataset of real clinical notes .", "this dataset , which we call clip , is annotated by physicians and covers 718 documents representing 100k sentences .", "we describe the task of extracting the action items from these documents as multi - aspect extractive summarization , with each aspect representing a type of action to be taken .", "we evaluate several machine learning models on this task , and show that the best models exploit in - domain language model pre - training on 59k unannotated documents , and incorporate context from neighboring sentences .", "we also propose an approach to pre - training data selection that allows us to explore the trade - off between size and domain - specificity of pre - training datasets for this task ."], "events": [{"event_type": "RWF", "arguments": [{"text": "easily lost", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["easily", "lost"], "offsets": [51, 52]}, {"text": "lengthiness of the documents", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["lengthiness", "of", "the", "documents"], "offsets": [56, 57, 58, 59]}], "trigger": {"text": "easily lost", "tokens": ["easily", "lost"], "offsets": [51, 52]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [65]}, {"text": "a dataset of clinical action items annotated over mimic - iii", "nugget_type": "DST", "argument_type": "Content", "tokens": ["a", "dataset", "of", "clinical", "action", "items", "annotated", "over", "mimic", "-", "iii"], "offsets": [70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80]}], "trigger": {"text": "creation", "tokens": ["creation"], "offsets": [68]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [180]}, {"text": "approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["approach"], "offsets": [184]}, {"text": "pre - training", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["pre", "-", "training"], "offsets": [186, 187, 188]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [182]}}, {"event_type": "PUR", "arguments": [{"text": "data selection", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["data", "selection"], "offsets": [189, 190]}], "trigger": {"text": "pre - training", "tokens": ["pre", "-", "training"], "offsets": [186, 187, 188]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [143]}, {"text": "several machine learning models", "nugget_type": "APP", "argument_type": "Content", "tokens": ["several", "machine", "learning", "models"], "offsets": [145, 146, 147, 148]}], "trigger": {"text": "evaluate", "tokens": ["evaluate"], "offsets": [144]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [143]}, {"text": "exploit", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["exploit"], "offsets": [159]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [154]}}, {"event_type": "FAC", "arguments": [{"text": "best models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["best", "models"], "offsets": [157, 158]}, {"text": "in - domain language model pre - training", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["in", "-", "domain", "language", "model", "pre", "-", "training"], "offsets": [160, 161, 162, 163, 164, 165, 166, 167]}, {"text": "59k unannotated documents", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["59k", "unannotated", "documents"], "offsets": [169, 170, 171]}], "trigger": {"text": "exploit", "tokens": ["exploit"], "offsets": [159]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [112]}, {"text": "task of extracting the action items from these documents as multi - aspect extractive summarization", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["task", "of", "extracting", "the", "action", "items", "from", "these", "documents", "as", "multi", "-", "aspect", "extractive", "summarization"], "offsets": [115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129]}], "trigger": {"text": "describe", "tokens": ["describe"], "offsets": [113]}}, {"event_type": "WKS", "arguments": [{"text": "clip", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["clip"], "offsets": [98]}, {"text": "physicians", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["physicians"], "offsets": [103]}, {"text": "covers 718 documents representing 100k sentences", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["covers", "718", "documents", "representing", "100k", "sentences"], "offsets": [105, 106, 107, 108, 109, 110]}], "trigger": {"text": "annotated", "tokens": ["annotated"], "offsets": [101]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [143]}, {"text": "context from neighboring sentences", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["context", "from", "neighboring", "sentences"], "offsets": [175, 176, 177, 178]}], "trigger": {"text": "incorporate", "tokens": ["incorporate"], "offsets": [174]}}], "document": ["continuity", "of", "care", "is", "crucial", "to", "ensuring", "positive", "health", "outcomes", "for", "patients", "discharged", "from", "an", "inpatient", "hospital", "setting", ",", "and", "improved", "information", "sharing", "can", "help", ".", "to", "share", "information", ",", "caregivers", "write", "discharge", "notes", "containing", "action", "items", "to", "share", "with", "patients", "and", "their", "future", "caregivers", ",", "but", "these", "action", "items", "are", "easily", "lost", "due", "to", "the", "lengthiness", "of", "the", "documents", ".", "in", "this", "work", ",", "we", "describe", "our", "creation", "of", "a", "dataset", "of", "clinical", "action", "items", "annotated", "over", "mimic", "-", "iii", ",", "the", "largest", "publicly", "available", "dataset", "of", "real", "clinical", "notes", ".", "this", "dataset", ",", "which", "we", "call", "clip", ",", "is", "annotated", "by", "physicians", "and", "covers", "718", "documents", "representing", "100k", "sentences", ".", "we", "describe", "the", "task", "of", "extracting", "the", "action", "items", "from", "these", "documents", "as", "multi", "-", "aspect", "extractive", "summarization", ",", "with", "each", "aspect", "representing", "a", "type", "of", "action", "to", "be", "taken", ".", "we", "evaluate", "several", "machine", "learning", "models", "on", "this", "task", ",", "and", "show", "that", "the", "best", "models", "exploit", "in", "-", "domain", "language", "model", "pre", "-", "training", "on", "59k", "unannotated", "documents", ",", "and", "incorporate", "context", "from", "neighboring", "sentences", ".", "we", "also", "propose", "an", "approach", "to", "pre", "-", "training", "data", "selection", "that", "allows", "us", "to", "explore", "the", "trade", "-", "off", "between", "size", "and", "domain", "-", "specificity", "of", "pre", "-", "training", "datasets", "for", "this", "task", "."]}, {"venue": "ACL", "title": "Relational Word Embeddings", "abstract": "While word embeddings have been shown to implicitly encode various forms of attributional knowledge, the extent to which they capture relational information is far more limited. In previous work, this limitation has been addressed by incorporating relational knowledge from external knowledge bases when learning the word embedding. Such strategies may not be optimal, however, as they are limited by the coverage of available resources and conflate similarity with other forms of relatedness. As an alternative, in this paper we propose to encode relational knowledge in a separate word embedding, which is aimed to be complementary to a given standard word embedding. This relational word embedding is still learned from co-occurrence statistics, and can thus be used even when no external knowledge base is available. Our analysis shows that relational word vectors do indeed capture information that is complementary to what is encoded in standard word embeddings.", "doc_id": "1608f46468f25ca9786b31a549517ae1", "publication_year": 2019, "sentences": ["while word embeddings have been shown to implicitly encode various forms of attributional knowledge , the extent to which they capture relational information is far more limited .", "in previous work , this limitation has been addressed by incorporating relational knowledge from external knowledge bases when learning the word embedding .", "such strategies may not be optimal , however , as they are limited by the coverage of available resources and conflate similarity with other forms of relatedness .", "as an alternative , in this paper we propose to encode relational knowledge in a separate word embedding , which is aimed to be complementary to a given standard word embedding .", "this relational word embedding is still learned from co - occurrence statistics , and can thus be used even when no external knowledge base is available .", "our analysis shows that relational word vectors do indeed capture information that is complementary to what is encoded in standard word embeddings ."], "events": [{"event_type": "ITT", "arguments": [{"text": "word embeddings", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["word", "embeddings"], "offsets": [1, 2]}], "trigger": {"text": "shown", "tokens": ["shown"], "offsets": [5]}}, {"event_type": "RWF", "arguments": [{"text": "limited", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["limited"], "offsets": [26]}], "trigger": {"text": "limited", "tokens": ["limited"], "offsets": [26]}}, {"event_type": "RWS", "arguments": [{"text": "previous work", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["previous", "work"], "offsets": [29, 30]}, {"text": "addressed", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["addressed"], "offsets": [36]}, {"text": "relational knowledge from external knowledge bases", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["relational", "knowledge", "from", "external", "knowledge", "bases"], "offsets": [39, 40, 41, 42, 43, 44]}, {"text": "when learning the word embedding", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "learning", "the", "word", "embedding"], "offsets": [45, 46, 47, 48, 49]}], "trigger": {"text": "incorporating", "tokens": ["incorporating"], "offsets": [38]}}, {"event_type": "PUR", "arguments": [{"text": "extent to which they capture relational information", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["extent", "to", "which", "they", "capture", "relational", "information"], "offsets": [16, 17, 18, 19, 20, 21, 22]}], "trigger": {"text": "addressed", "tokens": ["addressed"], "offsets": [36]}}, {"event_type": "RWF", "arguments": [{"text": "strategies", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["strategies"], "offsets": [52]}, {"text": "limited", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["limited"], "offsets": [63]}, {"text": "coverage of available resources", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["coverage", "of", "available", "resources"], "offsets": [66, 67, 68, 69]}], "trigger": {"text": "limited", "tokens": ["limited"], "offsets": [63]}}, {"event_type": "MDS", "arguments": [{"text": "complementary", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["complementary"], "offsets": [103]}, {"text": "relational knowledge", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["relational", "knowledge"], "offsets": [90, 91]}, {"text": "separate word embedding", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["separate", "word", "embedding"], "offsets": [94, 95, 96]}], "trigger": {"text": "encode", "tokens": ["encode"], "offsets": [89]}}, {"event_type": "PUR", "arguments": [{"text": "given standard word embedding", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["given", "standard", "word", "embedding"], "offsets": [106, 107, 108, 109]}], "trigger": {"text": "complementary", "tokens": ["complementary"], "offsets": [103]}}, {"event_type": "FIN", "arguments": [{"text": "capture", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["capture"], "offsets": [147]}], "trigger": {"text": "shows", "tokens": ["shows"], "offsets": [140]}}, {"event_type": "FAC", "arguments": [{"text": "information that is complementary to what is encoded in standard word embeddings", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["information", "that", "is", "complementary", "to", "what", "is", "encoded", "in", "standard", "word", "embeddings"], "offsets": [148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159]}, {"text": "relational word vectors", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["relational", "word", "vectors"], "offsets": [142, 143, 144]}], "trigger": {"text": "capture", "tokens": ["capture"], "offsets": [147]}}], "document": ["while", "word", "embeddings", "have", "been", "shown", "to", "implicitly", "encode", "various", "forms", "of", "attributional", "knowledge", ",", "the", "extent", "to", "which", "they", "capture", "relational", "information", "is", "far", "more", "limited", ".", "in", "previous", "work", ",", "this", "limitation", "has", "been", "addressed", "by", "incorporating", "relational", "knowledge", "from", "external", "knowledge", "bases", "when", "learning", "the", "word", "embedding", ".", "such", "strategies", "may", "not", "be", "optimal", ",", "however", ",", "as", "they", "are", "limited", "by", "the", "coverage", "of", "available", "resources", "and", "conflate", "similarity", "with", "other", "forms", "of", "relatedness", ".", "as", "an", "alternative", ",", "in", "this", "paper", "we", "propose", "to", "encode", "relational", "knowledge", "in", "a", "separate", "word", "embedding", ",", "which", "is", "aimed", "to", "be", "complementary", "to", "a", "given", "standard", "word", "embedding", ".", "this", "relational", "word", "embedding", "is", "still", "learned", "from", "co", "-", "occurrence", "statistics", ",", "and", "can", "thus", "be", "used", "even", "when", "no", "external", "knowledge", "base", "is", "available", ".", "our", "analysis", "shows", "that", "relational", "word", "vectors", "do", "indeed", "capture", "information", "that", "is", "complementary", "to", "what", "is", "encoded", "in", "standard", "word", "embeddings", "."]}, {"venue": "ACL", "title": "ConditionalQA: A Complex Reading Comprehension Dataset with Conditional Answers", "abstract": "We describe a Question Answering (QA) dataset that contains complex questions with conditional answers, i.e. the answers are only applicable when certain conditions apply. We call this dataset ConditionalQA. In addition to conditional answers, the dataset also features:(1) long context documents with information that is related in logically complex ways;(2) multi-hop questions that require compositional logical reasoning;(3) a combination of extractive questions, yes/no questions, questions with multiple answers, and not-answerable questions;(4) questions asked without knowing the answers.We show that ConditionalQA is challenging for many of the existing QA models, especially in selecting answer conditions. We believe that this dataset will motivate further research in answering complex questions over long documents.", "doc_id": "7714c7b6c4998233b855bffff2d81f71", "publication_year": 2022, "sentences": ["we describe a question answering ( qa ) dataset that contains complex questions with conditional answers , i . e . the answers are only applicable when certain conditions apply .", "we call this dataset conditionalqa .", "in addition to conditional answers , the dataset also features : ( 1 ) long context documents with information that is related in logically complex ways ; ( 2 ) multi - hop questions that require compositional logical reasoning ; ( 3 ) a combination of extractive questions , yes / no questions , questions with multiple answers , and not - answerable questions ; ( 4 ) questions asked without knowing the answers .", "we show that conditionalqa is challenging for many of the existing qa models , especially in selecting answer conditions .", "we believe that this dataset will motivate further research in answering complex questions over long documents ."], "events": [{"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [0]}, {"text": "question answering ( qa ) dataset", "nugget_type": "DST", "argument_type": "Content", "tokens": ["question", "answering", "dataset"], "offsets": [3, 4, 8]}], "trigger": {"text": "describe", "tokens": ["describe"], "offsets": [1]}}, {"event_type": "FAC", "arguments": [{"text": "conditionalqa", "nugget_type": "DST", "argument_type": "Subject", "tokens": ["conditionalqa"], "offsets": [115]}, {"text": "existing qa models", "nugget_type": "APP", "argument_type": "Object", "tokens": ["existing", "qa", "models"], "offsets": [122, 123, 124]}], "trigger": {"text": "challenging", "tokens": ["challenging"], "offsets": [117]}}, {"event_type": "FAC", "arguments": [{"text": "conditionalqa", "nugget_type": "DST", "argument_type": "Subject", "tokens": ["conditionalqa"], "offsets": [115]}, {"text": "further research", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["further", "research"], "offsets": [139, 140]}, {"text": "answering", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["answering"], "offsets": [142]}], "trigger": {"text": "motivate", "tokens": ["motivate"], "offsets": [138]}}, {"event_type": "PUR", "arguments": [{"text": "complex questions", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["complex", "questions"], "offsets": [143, 144]}, {"text": "over long documents", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["over", "long", "documents"], "offsets": [145, 146, 147]}], "trigger": {"text": "answering", "tokens": ["answering"], "offsets": [142]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [112]}, {"text": "challenging", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["challenging"], "offsets": [117]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [113]}}], "document": ["we", "describe", "a", "question", "answering", "(", "qa", ")", "dataset", "that", "contains", "complex", "questions", "with", "conditional", "answers", ",", "i", ".", "e", ".", "the", "answers", "are", "only", "applicable", "when", "certain", "conditions", "apply", ".", "we", "call", "this", "dataset", "conditionalqa", ".", "in", "addition", "to", "conditional", "answers", ",", "the", "dataset", "also", "features", ":", "(", "1", ")", "long", "context", "documents", "with", "information", "that", "is", "related", "in", "logically", "complex", "ways", ";", "(", "2", ")", "multi", "-", "hop", "questions", "that", "require", "compositional", "logical", "reasoning", ";", "(", "3", ")", "a", "combination", "of", "extractive", "questions", ",", "yes", "/", "no", "questions", ",", "questions", "with", "multiple", "answers", ",", "and", "not", "-", "answerable", "questions", ";", "(", "4", ")", "questions", "asked", "without", "knowing", "the", "answers", ".", "we", "show", "that", "conditionalqa", "is", "challenging", "for", "many", "of", "the", "existing", "qa", "models", ",", "especially", "in", "selecting", "answer", "conditions", ".", "we", "believe", "that", "this", "dataset", "will", "motivate", "further", "research", "in", "answering", "complex", "questions", "over", "long", "documents", "."]}, {"venue": "ACL", "title": "Improving the Similarity Measure of Determinantal Point Processes for Extractive Multi-Document Summarization", "abstract": "The most important obstacles facing multi-document summarization include excessive redundancy in source descriptions and the looming shortage of training data. These obstacles prevent encoder-decoder models from being used directly, but optimization-based methods such as determinantal point processes (DPPs) are known to handle them well. In this paper we seek to strengthen a DPP-based method for extractive multi-document summarization by presenting a novel similarity measure inspired by capsule networks. The approach measures redundancy between a pair of sentences based on surface form and semantic information. We show that our DPP system with improved similarity measure performs competitively, outperforming strong summarization baselines on benchmark datasets. Our findings are particularly meaningful for summarizing documents created by multiple authors containing redundant yet lexically diverse expressions.", "doc_id": "ac4d1751268b4453f8ab4da900b8af31", "publication_year": 2019, "sentences": ["the most important obstacles facing multi - document summarization include excessive redundancy in source descriptions and the looming shortage of training data .", "these obstacles prevent encoder - decoder models from being used directly , but optimization - based methods such as determinantal point processes ( dpps ) are known to handle them well .", "in this paper we seek to strengthen a dpp - based method for extractive multi - document summarization by presenting a novel similarity measure inspired by capsule networks .", "the approach measures redundancy between a pair of sentences based on surface form and semantic information .", "we show that our dpp system with improved similarity measure performs competitively , outperforming strong summarization baselines on benchmark datasets .", "our findings are particularly meaningful for summarizing documents created by multiple authors containing redundant yet lexically diverse expressions ."], "events": [{"event_type": "RWF", "arguments": [{"text": "multi - document summarization", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["multi", "-", "document", "summarization"], "offsets": [5, 6, 7, 8]}, {"text": "in source descriptions", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "source", "descriptions"], "offsets": [12, 13, 14]}, {"text": "excessive", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["excessive"], "offsets": [10]}, {"text": "redundancy", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["redundancy"], "offsets": [11]}, {"text": "shortage of training data", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["shortage", "of", "training", "data"], "offsets": [18, 19, 20, 21]}, {"text": "looming", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["looming"], "offsets": [17]}], "trigger": {"text": "include", "tokens": ["include"], "offsets": [9]}}, {"event_type": "RWF", "arguments": [{"text": "encoder - decoder models", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["encoder", "-", "decoder", "models"], "offsets": [26, 27, 28, 29]}], "trigger": {"text": "prevent", "tokens": ["prevent"], "offsets": [25]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [58]}, {"text": "extractive multi - document summarization", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["extractive", "multi", "-", "document", "summarization"], "offsets": [68, 69, 70, 71, 72]}, {"text": "strengthen", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["strengthen"], "offsets": [61]}, {"text": "similarity measure", "nugget_type": "APP", "argument_type": "Content", "tokens": ["similarity", "measure"], "offsets": [77, 78]}], "trigger": {"text": "presenting", "tokens": ["presenting"], "offsets": [74]}}, {"event_type": "PUR", "arguments": [{"text": "dpp - based method", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["dpp", "-", "based", "method"], "offsets": [63, 64, 65, 66]}], "trigger": {"text": "strengthen", "tokens": ["strengthen"], "offsets": [61]}}, {"event_type": "WKS", "arguments": [{"text": "between a pair of sentences based on surface form and semantic information", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["between", "a", "pair", "of", "sentences", "based", "on", "surface", "form", "and", "semantic", "information"], "offsets": [88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99]}, {"text": "redundancy", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["redundancy"], "offsets": [87]}], "trigger": {"text": "measures", "tokens": ["measures"], "offsets": [86]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [101]}, {"text": "performs", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["performs"], "offsets": [111]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [102]}}, {"event_type": "FAC", "arguments": [{"text": "competitively", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["competitively"], "offsets": [112]}, {"text": "our dpp system", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["our", "determinantal", "point", "processes", "system"], "offsets": [104, 42, 43, 44, 106]}, {"text": "with improved similarity measure", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "improved", "similarity", "measure"], "offsets": [107, 108, 109, 110]}], "trigger": {"text": "performs", "tokens": ["performs"], "offsets": [111]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [101]}, {"text": "outperforming", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforming"], "offsets": [114]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [102]}}, {"event_type": "CMP", "arguments": [{"text": "with improved similarity measure", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "improved", "similarity", "measure"], "offsets": [107, 108, 109, 110]}, {"text": "outperforming", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforming"], "offsets": [114]}, {"text": "strong summarization baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["strong", "summarization", "baselines"], "offsets": [115, 116, 117]}, {"text": "benchmark datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["benchmark", "datasets"], "offsets": [119, 120]}], "trigger": {"text": "outperforming", "tokens": ["outperforming"], "offsets": [114]}}], "document": ["the", "most", "important", "obstacles", "facing", "multi", "-", "document", "summarization", "include", "excessive", "redundancy", "in", "source", "descriptions", "and", "the", "looming", "shortage", "of", "training", "data", ".", "these", "obstacles", "prevent", "encoder", "-", "decoder", "models", "from", "being", "used", "directly", ",", "but", "optimization", "-", "based", "methods", "such", "as", "determinantal", "point", "processes", "(", "dpps", ")", "are", "known", "to", "handle", "them", "well", ".", "in", "this", "paper", "we", "seek", "to", "strengthen", "a", "dpp", "-", "based", "method", "for", "extractive", "multi", "-", "document", "summarization", "by", "presenting", "a", "novel", "similarity", "measure", "inspired", "by", "capsule", "networks", ".", "the", "approach", "measures", "redundancy", "between", "a", "pair", "of", "sentences", "based", "on", "surface", "form", "and", "semantic", "information", ".", "we", "show", "that", "our", "dpp", "system", "with", "improved", "similarity", "measure", "performs", "competitively", ",", "outperforming", "strong", "summarization", "baselines", "on", "benchmark", "datasets", ".", "our", "findings", "are", "particularly", "meaningful", "for", "summarizing", "documents", "created", "by", "multiple", "authors", "containing", "redundant", "yet", "lexically", "diverse", "expressions", "."]}, {"venue": "ACL", "title": "One Country, 700+ Languages: NLP Challenges for Underrepresented Languages and Dialects in Indonesia", "abstract": "NLP research is impeded by a lack of resources and awareness of the challenges presented by underrepresented languages and dialects. Focusing on the languages spoken in Indonesia, the second most linguistically diverse and the fourth most populous nation of the world, we provide an overview of the current state of NLP research for Indonesia\u2019s 700+ languages. We highlight challenges in Indonesian NLP and how these affect the performance of current NLP systems. Finally, we provide general recommendations to help develop NLP technology not only for languages of Indonesia but also other underrepresented languages.", "doc_id": "319de5433714c1707f3dfbdfeb5e7fa5", "publication_year": 2022, "sentences": ["nlp research is impeded by a lack of resources and awareness of the challenges presented by underrepresented languages and dialects .", "focusing on the languages spoken in indonesia , the second most linguistically diverse and the fourth most populous nation of the world , we provide an overview of the current state of nlp research for indonesia \u2019 s 700 + languages .", "we highlight challenges in indonesian nlp and how these affect the performance of current nlp systems .", "finally , we provide general recommendations to help develop nlp technology not only for languages of indonesia but also other underrepresented languages ."], "events": [{"event_type": "RWF", "arguments": [{"text": "nlp research", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["nlp", "research"], "offsets": [0, 1]}, {"text": "impeded", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["impeded"], "offsets": [3]}, {"text": "lack of resources", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["lack", "of", "resources"], "offsets": [6, 7, 8]}], "trigger": {"text": "impeded", "tokens": ["impeded"], "offsets": [3]}}, {"event_type": "WKS", "arguments": [{"text": "indonesia \u2019 s 700 + languages", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["indonesia", "\u2019", "s", "700", "+", "languages"], "offsets": [56, 57, 58, 59, 60, 61]}, {"text": "overview of the current state of nlp research", "nugget_type": "APP", "argument_type": "Content", "tokens": ["overview", "of", "the", "current", "state", "of", "nlp", "research"], "offsets": [47, 48, 49, 50, 51, 52, 53, 54]}, {"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [44]}], "trigger": {"text": "provide", "tokens": ["provide"], "offsets": [45]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [63]}, {"text": "indonesian nlp", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["indonesian", "nlp"], "offsets": [67, 68]}, {"text": "how these affect the performance of current nlp systems", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["how", "these", "affect", "the", "performance", "of", "current", "nlp", "systems"], "offsets": [70, 71, 72, 73, 74, 75, 76, 77, 78]}, {"text": "challenges", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["challenges"], "offsets": [65]}], "trigger": {"text": "highlight", "tokens": ["highlight"], "offsets": [64]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [82]}, {"text": "general recommendations", "nugget_type": "APP", "argument_type": "Content", "tokens": ["general", "recommendations"], "offsets": [84, 85]}, {"text": "develop", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["develop"], "offsets": [88]}, {"text": "not only for languages of indonesia but also other underrepresented languages", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["not", "only", "for", "languages", "of", "indonesia", "but", "also", "other", "underrepresented", "languages"], "offsets": [91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101]}], "trigger": {"text": "provide", "tokens": ["provide"], "offsets": [83]}}, {"event_type": "PUR", "arguments": [{"text": "nlp technology", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["nlp", "technology"], "offsets": [89, 90]}], "trigger": {"text": "develop", "tokens": ["develop"], "offsets": [88]}}], "document": ["nlp", "research", "is", "impeded", "by", "a", "lack", "of", "resources", "and", "awareness", "of", "the", "challenges", "presented", "by", "underrepresented", "languages", "and", "dialects", ".", "focusing", "on", "the", "languages", "spoken", "in", "indonesia", ",", "the", "second", "most", "linguistically", "diverse", "and", "the", "fourth", "most", "populous", "nation", "of", "the", "world", ",", "we", "provide", "an", "overview", "of", "the", "current", "state", "of", "nlp", "research", "for", "indonesia", "\u2019", "s", "700", "+", "languages", ".", "we", "highlight", "challenges", "in", "indonesian", "nlp", "and", "how", "these", "affect", "the", "performance", "of", "current", "nlp", "systems", ".", "finally", ",", "we", "provide", "general", "recommendations", "to", "help", "develop", "nlp", "technology", "not", "only", "for", "languages", "of", "indonesia", "but", "also", "other", "underrepresented", "languages", "."]}, {"venue": "ACL", "title": "Towards Open Domain Event Trigger Identification using Adversarial Domain Adaptation", "abstract": "We tackle the task of building supervised event trigger identification models which can generalize better across domains. Our work leverages the adversarial domain adaptation (ADA) framework to introduce domain-invariance. ADA uses adversarial training to construct representations that are predictive for trigger identification, but not predictive of the example\u2019s domain. It requires no labeled data from the target domain, making it completely unsupervised. Experiments with two domains (English literature and news) show that ADA leads to an average F1 score improvement of 3.9 on out-of-domain data. Our best performing model (BERT-A) reaches 44-49 F1 across both domains, using no labeled target data. Preliminary experiments reveal that finetuning on 1% labeled data, followed by self-training leads to substantial improvement, reaching 51.5 and 67.2 F1 on literature and news respectively.", "doc_id": "5b7bf44f563b0c21a3d31d9348f4f931", "publication_year": 2020, "sentences": ["we tackle the task of building supervised event trigger identification models which can generalize better across domains .", "our work leverages the adversarial domain adaptation ( ada ) framework to introduce domain - invariance .", "ada uses adversarial training to construct representations that are predictive for trigger identification , but not predictive of the example \u2019 s domain .", "it requires no labeled data from the target domain , making it completely unsupervised .", "experiments with two domains ( english literature and news ) show that ada leads to an average f1 score improvement of 3 . 9 on out - of - domain data .", "our best performing model ( bert - a ) reaches 44 - 49 f1 across both domains , using no labeled target data .", "preliminary experiments reveal that finetuning on 1 % labeled data , followed by self - training leads to substantial improvement , reaching 51 . 5 and 67 . 2 f1 on literature and news respectively ."], "events": [{"event_type": "MDS", "arguments": [{"text": "supervised event trigger identification models", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["supervised", "event", "trigger", "identification", "models"], "offsets": [6, 7, 8, 9, 10]}, {"text": "generalize", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["generalize"], "offsets": [13]}], "trigger": {"text": "building", "tokens": ["building"], "offsets": [5]}}, {"event_type": "PUR", "arguments": [{"text": "better across domains", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["better", "across", "domains"], "offsets": [14, 15, 16]}], "trigger": {"text": "generalize", "tokens": ["generalize"], "offsets": [13]}}, {"event_type": "MDS", "arguments": [{"text": "adversarial domain adaptation ( ada ) framework", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["adversarial", "domain", "adaptation", "framework"], "offsets": [22, 23, 24, 28]}, {"text": "introduce", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["introduce"], "offsets": [30]}], "trigger": {"text": "leverages", "tokens": ["leverages"], "offsets": [20]}}, {"event_type": "PUR", "arguments": [{"text": "domain - invariance", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["domain", "-", "invariance"], "offsets": [31, 32, 33]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [30]}}, {"event_type": "MDS", "arguments": [{"text": "construct", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["construct"], "offsets": [40]}, {"text": "adversarial training", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["adversarial", "training"], "offsets": [37, 38]}], "trigger": {"text": "uses", "tokens": ["uses"], "offsets": [36]}}, {"event_type": "PUR", "arguments": [{"text": "representations that are predictive for trigger identification", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["representations", "that", "are", "predictive", "for", "trigger", "identification"], "offsets": [41, 42, 43, 44, 45, 46, 47]}], "trigger": {"text": "construct", "tokens": ["construct"], "offsets": [40]}}, {"event_type": "FIN", "arguments": [{"text": "leads", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["leads"], "offsets": [87]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [84]}}, {"event_type": "CMP", "arguments": [{"text": "average f1 score", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["average", "f1", "score"], "offsets": [90, 91, 92]}, {"text": "3 . 9", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["3", ".", "9"], "offsets": [95, 96, 97]}, {"text": "on out - of - domain data", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "out", "-", "of", "-", "domain", "data"], "offsets": [98, 99, 100, 101, 102, 103, 104]}, {"text": "improvement", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improvement"], "offsets": [93]}, {"text": "adversarial domain adaptation ( ada ) framework", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["adversarial", "domain", "adaptation", "framework"], "offsets": [22, 23, 24, 28]}], "trigger": {"text": "leads", "tokens": ["leads"], "offsets": [87]}}, {"event_type": "FAC", "arguments": [{"text": "using no labeled target data", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["using", "no", "labeled", "target", "data"], "offsets": [124, 125, 126, 127, 128]}, {"text": "44", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["44"], "offsets": [116]}, {"text": "english literature", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["english", "literature"], "offsets": [79, 80]}, {"text": "bert - a", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["bert", "-", "a"], "offsets": [111, 112, 113]}, {"text": "f1", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["f1"], "offsets": [119]}], "trigger": {"text": "reaches", "tokens": ["reaches"], "offsets": [115]}}, {"event_type": "FAC", "arguments": [{"text": "bert - a", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["bert", "-", "a"], "offsets": [111, 112, 113]}, {"text": "news", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["news"], "offsets": [82]}, {"text": "49", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["49"], "offsets": [118]}, {"text": "f1", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["f1"], "offsets": [119]}, {"text": "using no labeled target data", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["using", "no", "labeled", "target", "data"], "offsets": [124, 125, 126, 127, 128]}], "trigger": {"text": "reaches", "tokens": ["reaches"], "offsets": [115]}}, {"event_type": "FAC", "arguments": [{"text": "finetuning on 1 % labeled data", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["finetuning", "on", "1", "%", "labeled", "data"], "offsets": [134, 135, 136, 137, 138, 139]}, {"text": "followed by self - training leads to substantial improvement", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["followed", "by", "self", "-", "training", "leads", "to", "substantial", "improvement"], "offsets": [141, 142, 143, 144, 145, 146, 147, 148, 149]}, {"text": "51 . 5", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["51", ".", "5"], "offsets": [152, 153, 154]}, {"text": "f1", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["f1"], "offsets": [159]}, {"text": "on literature", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on"], "offsets": [160]}], "trigger": {"text": "reaching", "tokens": ["reaching"], "offsets": [151]}}, {"event_type": "FAC", "arguments": [{"text": "finetuning on 1 % labeled data", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["finetuning", "on", "1", "%", "labeled", "data"], "offsets": [134, 135, 136, 137, 138, 139]}, {"text": "followed by self - training leads to substantial improvement", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["followed", "by", "self", "-", "training", "leads", "to", "substantial", "improvement"], "offsets": [141, 142, 143, 144, 145, 146, 147, 148, 149]}, {"text": "67 . 2", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["67", ".", "2"], "offsets": [156, 157, 158]}, {"text": "f1", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["f1"], "offsets": [159]}, {"text": "on literature and news", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "news"], "offsets": [160, 163]}], "trigger": {"text": "reaching", "tokens": ["reaching"], "offsets": [151]}}], "document": ["we", "tackle", "the", "task", "of", "building", "supervised", "event", "trigger", "identification", "models", "which", "can", "generalize", "better", "across", "domains", ".", "our", "work", "leverages", "the", "adversarial", "domain", "adaptation", "(", "ada", ")", "framework", "to", "introduce", "domain", "-", "invariance", ".", "ada", "uses", "adversarial", "training", "to", "construct", "representations", "that", "are", "predictive", "for", "trigger", "identification", ",", "but", "not", "predictive", "of", "the", "example", "\u2019", "s", "domain", ".", "it", "requires", "no", "labeled", "data", "from", "the", "target", "domain", ",", "making", "it", "completely", "unsupervised", ".", "experiments", "with", "two", "domains", "(", "english", "literature", "and", "news", ")", "show", "that", "ada", "leads", "to", "an", "average", "f1", "score", "improvement", "of", "3", ".", "9", "on", "out", "-", "of", "-", "domain", "data", ".", "our", "best", "performing", "model", "(", "bert", "-", "a", ")", "reaches", "44", "-", "49", "f1", "across", "both", "domains", ",", "using", "no", "labeled", "target", "data", ".", "preliminary", "experiments", "reveal", "that", "finetuning", "on", "1", "%", "labeled", "data", ",", "followed", "by", "self", "-", "training", "leads", "to", "substantial", "improvement", ",", "reaching", "51", ".", "5", "and", "67", ".", "2", "f1", "on", "literature", "and", "news", "respectively", "."]}, {"venue": "ACL", "title": "Neural Decipherment via Minimum-Cost Flow: From Ugaritic to Linear B", "abstract": "In this paper we propose a novel neural approach for automatic decipherment of lost languages. To compensate for the lack of strong supervision signal, our model design is informed by patterns in language change documented in historical linguistics. The model utilizes an expressive sequence-to-sequence model to capture character-level correspondences between cognates. To effectively train the model in unsupervised manner, we innovate the training procedure by formalizing it as a minimum-cost flow problem. When applied to decipherment of Ugaritic, we achieve 5% absolute improvement over state-of-the-art results. We also report first automatic results in deciphering Linear B, a syllabic language related to ancient Greek, where our model correctly translates 67.3% of cognates.", "doc_id": "02b59a9d768e3065720822c68dcd185e", "publication_year": 2019, "sentences": ["in this paper we propose a novel neural approach for automatic decipherment of lost languages .", "to compensate for the lack of strong supervision signal , our model design is informed by patterns in language change documented in historical linguistics .", "the model utilizes an expressive sequence - to - sequence model to capture character - level correspondences between cognates .", "to effectively train the model in unsupervised manner , we innovate the training procedure by formalizing it as a minimum - cost flow problem .", "when applied to decipherment of ugaritic , we achieve 5 % absolute improvement over state - of - the - art results .", "we also report first automatic results in deciphering linear b , a syllabic language related to ancient greek , where our model correctly translates 67 . 3 % of cognates ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [3]}, {"text": "novel neural approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["novel", "neural", "approach"], "offsets": [6, 7, 8]}, {"text": "automatic decipherment of lost languages", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["automatic", "decipherment", "of", "lost", "languages"], "offsets": [10, 11, 12, 13, 14]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [4]}}, {"event_type": "MDS", "arguments": [{"text": "compensate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["compensate"], "offsets": [17]}, {"text": "patterns", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["patterns"], "offsets": [32]}, {"text": "in language change documented in historical linguistics", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "language", "change", "documented", "in", "historical", "linguistics"], "offsets": [33, 34, 35, 36, 37, 38, 39]}], "trigger": {"text": "informed", "tokens": ["informed"], "offsets": [30]}}, {"event_type": "PUR", "arguments": [{"text": "lack of strong supervision signal", "nugget_type": "WEA", "argument_type": "Aim", "tokens": ["lack", "of", "strong", "supervision", "signal"], "offsets": [20, 21, 22, 23, 24]}], "trigger": {"text": "compensate", "tokens": ["compensate"], "offsets": [17]}}, {"event_type": "MDS", "arguments": [{"text": "expressive sequence - to - sequence model", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["expressive", "sequence", "-", "to", "-", "sequence", "model"], "offsets": [45, 46, 47, 48, 49, 50, 51]}, {"text": "capture", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["capture"], "offsets": [53]}], "trigger": {"text": "utilizes", "tokens": ["utilizes"], "offsets": [43]}}, {"event_type": "PUR", "arguments": [{"text": "character - level correspondences between cognates", "nugget_type": "MOD", "argument_type": "Aim", "tokens": ["character", "-", "level", "correspondences", "between", "cognates"], "offsets": [54, 55, 56, 57, 58, 59]}], "trigger": {"text": "capture", "tokens": ["capture"], "offsets": [53]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [70]}, {"text": "training procedure", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["training", "procedure"], "offsets": [73, 74]}, {"text": "train", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["train"], "offsets": [63]}], "trigger": {"text": "innovate", "tokens": ["innovate"], "offsets": [71]}}, {"event_type": "PUR", "arguments": [{"text": "model", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["model"], "offsets": [65]}, {"text": "in unsupervised manner", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "unsupervised", "manner"], "offsets": [66, 67, 68]}], "trigger": {"text": "train", "tokens": ["train"], "offsets": [63]}}, {"event_type": "MDS", "arguments": [{"text": "training procedure", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["training", "procedure"], "offsets": [73, 74]}, {"text": "minimum - cost flow problem", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["minimum", "-", "cost", "flow", "problem"], "offsets": [80, 81, 82, 83, 84]}], "trigger": {"text": "formalizing", "tokens": ["formalizing"], "offsets": [76]}}, {"event_type": "CMP", "arguments": [{"text": "state - of - the - art results", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art", "results"], "offsets": [100, 101, 102, 103, 104, 105, 106, 107]}, {"text": "5 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["5", "%"], "offsets": [95, 96]}, {"text": "absolute improvement", "nugget_type": "STR", "argument_type": "Result", "tokens": ["absolute", "improvement"], "offsets": [97, 98]}], "trigger": {"text": "achieve", "tokens": ["achieve"], "offsets": [94]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [109]}, {"text": "in deciphering linear b", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "deciphering", "linear", "b"], "offsets": [115, 116, 117, 118]}, {"text": "first automatic results", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["first", "automatic", "results"], "offsets": [112, 113, 114]}], "trigger": {"text": "report", "tokens": ["report"], "offsets": [111]}}, {"event_type": "FAC", "arguments": [{"text": "neural approach", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["neural", "approach"], "offsets": [7, 8]}, {"text": "67 . 3 % of cognates", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["67", ".", "3", "%", "of", "cognates"], "offsets": [133, 134, 135, 136, 137, 138]}], "trigger": {"text": "correctly translates", "tokens": ["correctly", "translates"], "offsets": [131, 132]}}], "document": ["in", "this", "paper", "we", "propose", "a", "novel", "neural", "approach", "for", "automatic", "decipherment", "of", "lost", "languages", ".", "to", "compensate", "for", "the", "lack", "of", "strong", "supervision", "signal", ",", "our", "model", "design", "is", "informed", "by", "patterns", "in", "language", "change", "documented", "in", "historical", "linguistics", ".", "the", "model", "utilizes", "an", "expressive", "sequence", "-", "to", "-", "sequence", "model", "to", "capture", "character", "-", "level", "correspondences", "between", "cognates", ".", "to", "effectively", "train", "the", "model", "in", "unsupervised", "manner", ",", "we", "innovate", "the", "training", "procedure", "by", "formalizing", "it", "as", "a", "minimum", "-", "cost", "flow", "problem", ".", "when", "applied", "to", "decipherment", "of", "ugaritic", ",", "we", "achieve", "5", "%", "absolute", "improvement", "over", "state", "-", "of", "-", "the", "-", "art", "results", ".", "we", "also", "report", "first", "automatic", "results", "in", "deciphering", "linear", "b", ",", "a", "syllabic", "language", "related", "to", "ancient", "greek", ",", "where", "our", "model", "correctly", "translates", "67", ".", "3", "%", "of", "cognates", "."]}, {"venue": "ACL", "title": "Understanding Gender Bias in Knowledge Base Embeddings", "abstract": "Knowledge base (KB) embeddings have been shown to contain gender biases. In this paper, we study two questions regarding these biases: how to quantify them, and how to trace their origins in KB? Specifically, first, we develop two novel bias measures respectively for a group of person entities and an individual person entity. Evidence of their validity is observed by comparison with real-world census data. Second, we use the influence function to inspect the contribution of each triple in KB to the overall group bias. To exemplify the potential applications of our study, we also present two strategies (by adding and removing KB triples) to mitigate gender biases in KB embeddings.", "doc_id": "5b9d6e34922593eace57bb6beb219c7e", "publication_year": 2022, "sentences": ["knowledge base ( kb ) embeddings have been shown to contain gender biases .", "in this paper , we study two questions regarding these biases : how to quantify them , and how to trace their origins in kb ?", "specifically , first , we develop two novel bias measures respectively for a group of person entities and an individual person entity .", "evidence of their validity is observed by comparison with real - world census data .", "second , we use the influence function to inspect the contribution of each triple in kb to the overall group bias .", "to exemplify the potential applications of our study , we also present two strategies ( by adding and removing kb triples ) to mitigate gender biases in kb embeddings ."], "events": [{"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [18]}, {"text": "quantify", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["quantify"], "offsets": [28]}, {"text": "trace", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["trace"], "offsets": [34]}, {"text": "questions regarding these biases", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["questions", "regarding", "gender", "biases"], "offsets": [21, 22, 11, 12]}], "trigger": {"text": "study", "tokens": ["study"], "offsets": [19]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [44]}, {"text": "two novel bias measures", "nugget_type": "APP", "argument_type": "Content", "tokens": ["two", "novel", "bias", "measures"], "offsets": [46, 47, 48, 49]}, {"text": "group of person entities", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["group", "of", "person", "entities"], "offsets": [53, 54, 55, 56]}, {"text": "individual person entity", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["individual", "person", "entity"], "offsets": [59, 60, 61]}], "trigger": {"text": "develop", "tokens": ["develop"], "offsets": [45]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [109]}, {"text": "two strategies", "nugget_type": "APP", "argument_type": "Content", "tokens": ["two", "strategies"], "offsets": [112, 113]}, {"text": "exemplify", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["exemplify"], "offsets": [101]}, {"text": "mitigate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["mitigate"], "offsets": [123]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [111]}}, {"event_type": "PUR", "arguments": [{"text": "potential applications of our study", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["potential", "applications", "of", "our", "study"], "offsets": [103, 104, 105, 106, 107]}], "trigger": {"text": "exemplify", "tokens": ["exemplify"], "offsets": [101]}}, {"event_type": "RWF", "arguments": [{"text": "knowledge base ( kb ) embeddings", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["knowledge", "base", "embeddings"], "offsets": [0, 1, 5]}, {"text": "gender biases", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["gender", "biases"], "offsets": [11, 12]}], "trigger": {"text": "contain", "tokens": ["contain"], "offsets": [10]}}, {"event_type": "WKS", "arguments": [{"text": "real - world census data", "nugget_type": "DST", "argument_type": "Content", "tokens": ["real", "-", "world", "census", "data"], "offsets": [72, 73, 74, 75, 76]}, {"text": "observed", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["observed"], "offsets": [68]}], "trigger": {"text": "comparison", "tokens": ["comparison"], "offsets": [70]}}, {"event_type": "PUR", "arguments": [{"text": "evidence of their validity", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["evidence", "of", "their", "validity"], "offsets": [63, 64, 65, 66]}], "trigger": {"text": "observed", "tokens": ["observed"], "offsets": [68]}}, {"event_type": "MDS", "arguments": [{"text": "influence function", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["influence", "function"], "offsets": [83, 84]}, {"text": "overall group bias", "nugget_type": "WEA", "argument_type": "Target", "tokens": ["overall", "group", "bias"], "offsets": [96, 97, 98]}, {"text": "contribution of each triple", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["contribution", "of", "each", "triple"], "offsets": [88, 89, 90, 91]}], "trigger": {"text": "inspect", "tokens": ["inspect"], "offsets": [86]}}, {"event_type": "PUR", "arguments": [{"text": "gender biases", "nugget_type": "WEA", "argument_type": "Aim", "tokens": ["gender", "biases"], "offsets": [124, 125]}, {"text": "in kb embeddings", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "knowledge", "base", "embeddings"], "offsets": [126, 0, 1, 128]}], "trigger": {"text": "mitigate", "tokens": ["mitigate"], "offsets": [123]}}, {"event_type": "PUR", "arguments": [{"text": "gender biases", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["gender", "biases"], "offsets": [11, 12]}], "trigger": {"text": "quantify", "tokens": ["quantify"], "offsets": [28]}}, {"event_type": "PUR", "arguments": [{"text": "their origins", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["gender", "biases", ".", "origins"], "offsets": [11, 12, 13, 36]}], "trigger": {"text": "trace", "tokens": ["trace"], "offsets": [34]}}], "document": ["knowledge", "base", "(", "kb", ")", "embeddings", "have", "been", "shown", "to", "contain", "gender", "biases", ".", "in", "this", "paper", ",", "we", "study", "two", "questions", "regarding", "these", "biases", ":", "how", "to", "quantify", "them", ",", "and", "how", "to", "trace", "their", "origins", "in", "kb", "?", "specifically", ",", "first", ",", "we", "develop", "two", "novel", "bias", "measures", "respectively", "for", "a", "group", "of", "person", "entities", "and", "an", "individual", "person", "entity", ".", "evidence", "of", "their", "validity", "is", "observed", "by", "comparison", "with", "real", "-", "world", "census", "data", ".", "second", ",", "we", "use", "the", "influence", "function", "to", "inspect", "the", "contribution", "of", "each", "triple", "in", "kb", "to", "the", "overall", "group", "bias", ".", "to", "exemplify", "the", "potential", "applications", "of", "our", "study", ",", "we", "also", "present", "two", "strategies", "(", "by", "adding", "and", "removing", "kb", "triples", ")", "to", "mitigate", "gender", "biases", "in", "kb", "embeddings", "."]}, {"venue": "ACL", "title": "One2Set: Generating Diverse Keyphrases as a Set", "abstract": "Recently, the sequence-to-sequence models have made remarkable progress on the task of keyphrase generation (KG) by concatenating multiple keyphrases in a predefined order as a target sequence during training. However, the keyphrases are inherently an unordered set rather than an ordered sequence. Imposing a predefined order will introduce wrong bias during training, which can highly penalize shifts in the order between keyphrases. In this work, we propose a new training paradigm One2Set without predefining an order to concatenate the keyphrases. To fit this paradigm, we propose a novel model that utilizes a fixed set of learned control codes as conditions to generate a set of keyphrases in parallel. To solve the problem that there is no correspondence between each prediction and target during training, we propose a K-step label assignment mechanism via bipartite matching, which greatly increases the diversity and reduces the repetition rate of generated keyphrases. The experimental results on multiple benchmarks demonstrate that our approach significantly outperforms the state-of-the-art methods.", "doc_id": "d5e67029f5019d4a7558a541e7814994", "publication_year": 2021, "sentences": ["recently , the sequence - to - sequence models have made remarkable progress on the task of keyphrase generation ( kg ) by concatenating multiple keyphrases in a predefined order as a target sequence during training .", "however , the keyphrases are inherently an unordered set rather than an ordered sequence .", "imposing a predefined order will introduce wrong bias during training , which can highly penalize shifts in the order between keyphrases .", "in this work , we propose a new training paradigm one2set without predefining an order to concatenate the keyphrases .", "to fit this paradigm , we propose a novel model that utilizes a fixed set of learned control codes as conditions to generate a set of keyphrases in parallel .", "to solve the problem that there is no correspondence between each prediction and target during training , we propose a k - step label assignment mechanism via bipartite matching , which greatly increases the diversity and reduces the repetition rate of generated keyphrases .", "the experimental results on multiple benchmarks demonstrate that our approach significantly outperforms the state - of - the - art methods ."], "events": [{"event_type": "ITT", "arguments": [{"text": "sequence - to - sequence models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["sequence", "-", "to", "-", "sequence", "models"], "offsets": [3, 4, 5, 6, 7, 8]}], "trigger": {"text": "made", "tokens": ["made"], "offsets": [10]}}, {"event_type": "RWF", "arguments": [{"text": "wrong bias", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["wrong", "bias"], "offsets": [58, 59]}, {"text": "during training", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["during", "training"], "offsets": [60, 61]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [57]}}, {"event_type": "RWF", "arguments": [{"text": "highly penalize", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["highly", "penalize"], "offsets": [65, 66]}, {"text": "shifts", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["shifts"], "offsets": [67]}], "trigger": {"text": "highly penalize", "tokens": ["highly", "penalize"], "offsets": [65, 66]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [78]}, {"text": "training paradigm one2set", "nugget_type": "APP", "argument_type": "Content", "tokens": ["training", "paradigm", "one2set"], "offsets": [82, 83, 84]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [79]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [99]}, {"text": "model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["model"], "offsets": [103]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [100]}}, {"event_type": "MDS", "arguments": [{"text": "conditions", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["conditions"], "offsets": [114]}, {"text": "fixed set of learned control codes", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["fixed", "set", "of", "learned", "control", "codes"], "offsets": [107, 108, 109, 110, 111, 112]}, {"text": "set of keyphrases", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["set", "of", "keyphrases"], "offsets": [118, 119, 120]}, {"text": "in parallel", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "parallel"], "offsets": [121, 122]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [116]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [141]}, {"text": "k - step label assignment mechanism via bipartite matching", "nugget_type": "APP", "argument_type": "Content", "tokens": ["k", "-", "step", "label", "assignment", "mechanism", "via", "bipartite", "matching"], "offsets": [144, 145, 146, 147, 148, 149, 150, 151, 152]}, {"text": "greatly increases", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["greatly", "increases"], "offsets": [155, 156]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [142]}}, {"event_type": "PUR", "arguments": [{"text": "diversity", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["diversity"], "offsets": [158]}], "trigger": {"text": "greatly increases", "tokens": ["greatly", "increases"], "offsets": [155, 156]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [141]}, {"text": "k - step label assignment mechanism via bipartite matching", "nugget_type": "APP", "argument_type": "Content", "tokens": ["k", "-", "step", "label", "assignment", "mechanism", "via", "bipartite", "matching"], "offsets": [144, 145, 146, 147, 148, 149, 150, 151, 152]}, {"text": "reduces", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["reduces"], "offsets": [160]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [142]}}, {"event_type": "PUR", "arguments": [{"text": "repetition rate of generated keyphrases", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["repetition", "rate", "of", "generated", "keyphrases"], "offsets": [162, 163, 164, 165, 166]}], "trigger": {"text": "reduces", "tokens": ["reduces"], "offsets": [160]}}, {"event_type": "FIN", "arguments": [{"text": "outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforms"], "offsets": [179]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [174]}}, {"event_type": "CMP", "arguments": [{"text": "training paradigm one2set", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["training", "paradigm", "one2set"], "offsets": [82, 83, 84]}, {"text": "significantly", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significantly"], "offsets": [178]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [179]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [179]}}], "document": ["recently", ",", "the", "sequence", "-", "to", "-", "sequence", "models", "have", "made", "remarkable", "progress", "on", "the", "task", "of", "keyphrase", "generation", "(", "kg", ")", "by", "concatenating", "multiple", "keyphrases", "in", "a", "predefined", "order", "as", "a", "target", "sequence", "during", "training", ".", "however", ",", "the", "keyphrases", "are", "inherently", "an", "unordered", "set", "rather", "than", "an", "ordered", "sequence", ".", "imposing", "a", "predefined", "order", "will", "introduce", "wrong", "bias", "during", "training", ",", "which", "can", "highly", "penalize", "shifts", "in", "the", "order", "between", "keyphrases", ".", "in", "this", "work", ",", "we", "propose", "a", "new", "training", "paradigm", "one2set", "without", "predefining", "an", "order", "to", "concatenate", "the", "keyphrases", ".", "to", "fit", "this", "paradigm", ",", "we", "propose", "a", "novel", "model", "that", "utilizes", "a", "fixed", "set", "of", "learned", "control", "codes", "as", "conditions", "to", "generate", "a", "set", "of", "keyphrases", "in", "parallel", ".", "to", "solve", "the", "problem", "that", "there", "is", "no", "correspondence", "between", "each", "prediction", "and", "target", "during", "training", ",", "we", "propose", "a", "k", "-", "step", "label", "assignment", "mechanism", "via", "bipartite", "matching", ",", "which", "greatly", "increases", "the", "diversity", "and", "reduces", "the", "repetition", "rate", "of", "generated", "keyphrases", ".", "the", "experimental", "results", "on", "multiple", "benchmarks", "demonstrate", "that", "our", "approach", "significantly", "outperforms", "the", "state", "-", "of", "-", "the", "-", "art", "methods", "."]}, {"venue": "ACL", "title": "Generating Relevant and Coherent Dialogue Responses using Self-Separated Conditional Variational AutoEncoders", "abstract": "Conditional Variational AutoEncoder (CVAE) effectively increases the diversity and informativeness of responses in open-ended dialogue generation tasks through enriching the context vector with sampled latent variables. However, due to the inherent one-to-many and many-to-one phenomena in human dialogues, the sampled latent variables may not correctly reflect the contexts\u2019 semantics, leading to irrelevant and incoherent generated responses. To resolve this problem, we propose Self-separated Conditional Variational AutoEncoder (abbreviated as SepaCVAE) that introduces group information to regularize the latent variables, which enhances CVAE by improving the responses\u2019 relevance and coherence while maintaining their diversity and informativeness. SepaCVAE actively divides the input data into groups, and then widens the absolute difference between data pairs from distinct groups, while narrowing the relative distance between data pairs in the same group. Empirical results from automatic evaluation and detailed analysis demonstrate that SepaCVAE can significantly boost responses in well-established open-domain dialogue datasets.", "doc_id": "c70c53f82110d34dca671fec233ffd81", "publication_year": 2021, "sentences": ["conditional variational autoencoder ( cvae ) effectively increases the diversity and informativeness of responses in open - ended dialogue generation tasks through enriching the context vector with sampled latent variables .", "however , due to the inherent one - to - many and many - to - one phenomena in human dialogues , the sampled latent variables may not correctly reflect the contexts \u2019 semantics , leading to irrelevant and incoherent generated responses .", "to resolve this problem , we propose self - separated conditional variational autoencoder ( abbreviated as sepacvae ) that introduces group information to regularize the latent variables , which enhances cvae by improving the responses \u2019 relevance and coherence while maintaining their diversity and informativeness .", "sepacvae actively divides the input data into groups , and then widens the absolute difference between data pairs from distinct groups , while narrowing the relative distance between data pairs in the same group .", "empirical results from automatic evaluation and detailed analysis demonstrate that sepacvae can significantly boost responses in well - established open - domain dialogue datasets ."], "events": [{"event_type": "RWS", "arguments": [{"text": "conditional variational autoencoder", "nugget_type": "MOD", "argument_type": "Subject", "tokens": ["conditional", "variational", "autoencoder"], "offsets": [0, 1, 2]}, {"text": "context vector", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["context", "vector"], "offsets": [24, 25]}, {"text": "sampled latent variables", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["sampled", "latent", "variables"], "offsets": [27, 28, 29]}, {"text": "increases", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["increases"], "offsets": [7]}, {"text": "in open - ended dialogue generation tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "open", "-", "ended", "dialogue", "generation", "tasks"], "offsets": [14, 15, 16, 17, 18, 19, 20]}], "trigger": {"text": "enriching", "tokens": ["enriching"], "offsets": [22]}}, {"event_type": "PUR", "arguments": [{"text": "diversity and informativeness of responses", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["diversity", "and", "informativeness", "of", "responses"], "offsets": [9, 10, 11, 12, 13]}], "trigger": {"text": "increases", "tokens": ["increases"], "offsets": [7]}}, {"event_type": "RWF", "arguments": [{"text": "sampled latent variables", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["sampled", "latent", "variables"], "offsets": [54, 55, 56]}, {"text": "not correctly reflect", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["not", "correctly", "reflect"], "offsets": [58, 59, 60]}, {"text": "contexts \u2019 semantics", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["contexts", "\u2019", "semantics"], "offsets": [62, 63, 64]}], "trigger": {"text": "not correctly reflect", "tokens": ["not", "correctly", "reflect"], "offsets": [58, 59, 60]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [79]}, {"text": "self - separated conditional variational autoencoder", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["self", "-", "separated", "conditional", "variational", "autoencoder"], "offsets": [81, 82, 83, 84, 85, 86]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [80]}}, {"event_type": "MDS", "arguments": [{"text": "group information", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["group", "information"], "offsets": [94, 95]}, {"text": "latent variables", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["latent", "variables"], "offsets": [99, 100]}], "trigger": {"text": "regularize", "tokens": ["regularize"], "offsets": [97]}}, {"event_type": "MDS", "arguments": [{"text": "enhances", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["enhances"], "offsets": [103]}, {"text": "responses \u2019 relevance and coherence", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["responses", "\u2019", "relevance", "and", "coherence"], "offsets": [108, 109, 110, 111, 112]}], "trigger": {"text": "improving", "tokens": ["improving"], "offsets": [106]}}, {"event_type": "PUR", "arguments": [{"text": "conditional variational autoencoder", "nugget_type": "MOD", "argument_type": "Aim", "tokens": ["conditional", "variational", "autoencoder"], "offsets": [0, 1, 2]}], "trigger": {"text": "enhances", "tokens": ["enhances"], "offsets": [103]}}, {"event_type": "MDS", "arguments": [{"text": "input data", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["input", "data"], "offsets": [124, 125]}, {"text": "groups", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["groups"], "offsets": [127]}, {"text": "sepacvae", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["sepacvae"], "offsets": [120]}], "trigger": {"text": "divides", "tokens": ["divides"], "offsets": [122]}}, {"event_type": "MDS", "arguments": [{"text": "absolute difference", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["absolute", "difference"], "offsets": [133, 134]}, {"text": "distinct groups", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["distinct", "groups"], "offsets": [139, 140]}], "trigger": {"text": "widens", "tokens": ["widens"], "offsets": [131]}}, {"event_type": "MDS", "arguments": [{"text": "relative distance", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["relative", "distance"], "offsets": [145, 146]}, {"text": "same group", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["same", "group"], "offsets": [152, 153]}], "trigger": {"text": "narrowing", "tokens": ["narrowing"], "offsets": [143]}}, {"event_type": "FAC", "arguments": [{"text": "sepacvae", "nugget_type": "MOD", "argument_type": "Subject", "tokens": ["sepacvae"], "offsets": [165]}, {"text": "responses", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["responses"], "offsets": [169]}, {"text": "in well - established open - domain dialogue datasets", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "well", "-", "established", "open", "-", "domain", "dialogue", "datasets"], "offsets": [170, 171, 172, 173, 174, 175, 176, 177, 178]}, {"text": "significantly", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significantly"], "offsets": [167]}], "trigger": {"text": "boost", "tokens": ["boost"], "offsets": [168]}}], "document": ["conditional", "variational", "autoencoder", "(", "cvae", ")", "effectively", "increases", "the", "diversity", "and", "informativeness", "of", "responses", "in", "open", "-", "ended", "dialogue", "generation", "tasks", "through", "enriching", "the", "context", "vector", "with", "sampled", "latent", "variables", ".", "however", ",", "due", "to", "the", "inherent", "one", "-", "to", "-", "many", "and", "many", "-", "to", "-", "one", "phenomena", "in", "human", "dialogues", ",", "the", "sampled", "latent", "variables", "may", "not", "correctly", "reflect", "the", "contexts", "\u2019", "semantics", ",", "leading", "to", "irrelevant", "and", "incoherent", "generated", "responses", ".", "to", "resolve", "this", "problem", ",", "we", "propose", "self", "-", "separated", "conditional", "variational", "autoencoder", "(", "abbreviated", "as", "sepacvae", ")", "that", "introduces", "group", "information", "to", "regularize", "the", "latent", "variables", ",", "which", "enhances", "cvae", "by", "improving", "the", "responses", "\u2019", "relevance", "and", "coherence", "while", "maintaining", "their", "diversity", "and", "informativeness", ".", "sepacvae", "actively", "divides", "the", "input", "data", "into", "groups", ",", "and", "then", "widens", "the", "absolute", "difference", "between", "data", "pairs", "from", "distinct", "groups", ",", "while", "narrowing", "the", "relative", "distance", "between", "data", "pairs", "in", "the", "same", "group", ".", "empirical", "results", "from", "automatic", "evaluation", "and", "detailed", "analysis", "demonstrate", "that", "sepacvae", "can", "significantly", "boost", "responses", "in", "well", "-", "established", "open", "-", "domain", "dialogue", "datasets", "."]}, {"venue": "ACL", "title": "BACO: A Background Knowledge- and Content-Based Framework for Citing Sentence Generation", "abstract": "In this paper, we focus on the problem of citing sentence generation, which entails generating a short text to capture the salient information in a cited paper and the connection between the citing and cited paper. We present BACO, a BAckground knowledge- and COntent-based framework for citing sentence generation, which considers two types of information: (1) background knowledge by leveraging structural information from a citation network; and (2) content, which represents in-depth information about what to cite and why to cite. First, a citation network is encoded to provide background knowledge. Second, we apply salience estimation to identify what to cite by estimating the importance of sentences in the cited paper. During the decoding stage, both types of information are combined to facilitate the text generation, and then we conduct a joint training for the generator and citation function classification to make the model aware of why to cite. Our experimental results show that our framework outperforms comparative baselines.", "doc_id": "940e9685fe566e46474d6dc99bedb7be", "publication_year": 2021, "sentences": ["in this paper , we focus on the problem of citing sentence generation , which entails generating a short text to capture the salient information in a cited paper and the connection between the citing and cited paper .", "we present baco , a background knowledge - and content - based framework for citing sentence generation , which considers two types of information : ( 1 ) background knowledge by leveraging structural information from a citation network ; and ( 2 ) content , which represents in - depth information about what to cite and why to cite .", "first , a citation network is encoded to provide background knowledge .", "second , we apply salience estimation to identify what to cite by estimating the importance of sentences in the cited paper .", "during the decoding stage , both types of information are combined to facilitate the text generation , and then we conduct a joint training for the generator and citation function classification to make the model aware of why to cite .", "our experimental results show that our framework outperforms comparative baselines ."], "events": [{"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [4]}, {"text": "problem of citing sentence generation", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["problem", "of", "citing", "sentence", "generation"], "offsets": [8, 9, 10, 11, 12]}], "trigger": {"text": "focus", "tokens": ["focus"], "offsets": [5]}}, {"event_type": "MDS", "arguments": [{"text": "short text", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["short", "text"], "offsets": [18, 19]}, {"text": "salient information", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["salient", "information"], "offsets": [23, 24]}, {"text": "in a cited paper", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "a", "cited", "paper"], "offsets": [25, 26, 27, 28]}], "trigger": {"text": "capture", "tokens": ["capture"], "offsets": [21]}}, {"event_type": "WKS", "arguments": [{"text": "connection", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["connection"], "offsets": [31]}, {"text": "between the citing and cited paper", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["between", "the", "citing", "and", "cited", "paper"], "offsets": [32, 33, 34, 35, 36, 37]}], "trigger": {"text": "capture", "tokens": ["capture"], "offsets": [21]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [39]}, {"text": "baco", "nugget_type": "APP", "argument_type": "Content", "tokens": ["baco"], "offsets": [41]}, {"text": "citing sentence generation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["citing", "sentence", "generation"], "offsets": [53, 54, 55]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [40]}}, {"event_type": "WKS", "arguments": [{"text": "background knowledge", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["background", "knowledge"], "offsets": [67, 68]}, {"text": "content", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["content"], "offsets": [82]}], "trigger": {"text": "considers", "tokens": ["considers"], "offsets": [58]}}, {"event_type": "MDS", "arguments": [{"text": "structural information", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["structural", "information"], "offsets": [71, 72]}, {"text": "citation network", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["citation", "network"], "offsets": [75, 76]}], "trigger": {"text": "leveraging", "tokens": ["leveraging"], "offsets": [70]}}, {"event_type": "WKS", "arguments": [{"text": "citation network", "nugget_type": "APP", "argument_type": "Content", "tokens": ["citation", "network"], "offsets": [102, 103]}, {"text": "provide", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["provide"], "offsets": [107]}], "trigger": {"text": "encoded", "tokens": ["encoded"], "offsets": [105]}}, {"event_type": "PUR", "arguments": [{"text": "background knowledge", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["background", "knowledge"], "offsets": [108, 109]}], "trigger": {"text": "provide", "tokens": ["provide"], "offsets": [107]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [113]}, {"text": "salience estimation", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["salience", "estimation"], "offsets": [115, 116]}], "trigger": {"text": "apply", "tokens": ["apply"], "offsets": [114]}}, {"event_type": "WKS", "arguments": [{"text": "the importance of sentences", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["the", "importance", "of", "sentences"], "offsets": [124, 125, 126, 127]}, {"text": "in the cited paper", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "cited", "paper"], "offsets": [128, 129, 130, 131]}, {"text": "identify", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["identify"], "offsets": [118]}], "trigger": {"text": "estimating", "tokens": ["estimating"], "offsets": [123]}}, {"event_type": "MDS", "arguments": [{"text": "during the decoding stage", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["during", "the", "decoding", "stage"], "offsets": [133, 134, 135, 136]}, {"text": "facilitate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["facilitate"], "offsets": [145]}, {"text": "background knowledge", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["background", "knowledge"], "offsets": [67, 68]}, {"text": "content", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["content"], "offsets": [82]}], "trigger": {"text": "combined", "tokens": ["combined"], "offsets": [143]}}, {"event_type": "PUR", "arguments": [{"text": "text generation", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["text", "generation"], "offsets": [147, 148]}], "trigger": {"text": "facilitate", "tokens": ["facilitate"], "offsets": [145]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [152]}, {"text": "joint training", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["joint", "training"], "offsets": [155, 156]}, {"text": "generator", "nugget_type": "MOD", "argument_type": "Target", "tokens": ["generator"], "offsets": [159]}, {"text": "citation function classification", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["citation", "function", "classification"], "offsets": [161, 162, 163]}], "trigger": {"text": "conduct", "tokens": ["conduct"], "offsets": [153]}}, {"event_type": "FIN", "arguments": [{"text": "outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforms"], "offsets": [181]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [177]}}, {"event_type": "CMP", "arguments": [{"text": "baco", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["baco"], "offsets": [41]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [181]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [181]}}], "document": ["in", "this", "paper", ",", "we", "focus", "on", "the", "problem", "of", "citing", "sentence", "generation", ",", "which", "entails", "generating", "a", "short", "text", "to", "capture", "the", "salient", "information", "in", "a", "cited", "paper", "and", "the", "connection", "between", "the", "citing", "and", "cited", "paper", ".", "we", "present", "baco", ",", "a", "background", "knowledge", "-", "and", "content", "-", "based", "framework", "for", "citing", "sentence", "generation", ",", "which", "considers", "two", "types", "of", "information", ":", "(", "1", ")", "background", "knowledge", "by", "leveraging", "structural", "information", "from", "a", "citation", "network", ";", "and", "(", "2", ")", "content", ",", "which", "represents", "in", "-", "depth", "information", "about", "what", "to", "cite", "and", "why", "to", "cite", ".", "first", ",", "a", "citation", "network", "is", "encoded", "to", "provide", "background", "knowledge", ".", "second", ",", "we", "apply", "salience", "estimation", "to", "identify", "what", "to", "cite", "by", "estimating", "the", "importance", "of", "sentences", "in", "the", "cited", "paper", ".", "during", "the", "decoding", "stage", ",", "both", "types", "of", "information", "are", "combined", "to", "facilitate", "the", "text", "generation", ",", "and", "then", "we", "conduct", "a", "joint", "training", "for", "the", "generator", "and", "citation", "function", "classification", "to", "make", "the", "model", "aware", "of", "why", "to", "cite", ".", "our", "experimental", "results", "show", "that", "our", "framework", "outperforms", "comparative", "baselines", "."]}, {"venue": "ACL", "title": "Word2Box: Capturing Set-Theoretic Semantics of Words using Box Embeddings", "abstract": "Learning representations of words in a continuous space is perhaps the most fundamental task in NLP, however words interact in ways much richer than vector dot product similarity can provide. Many relationships between words can be expressed set-theoretically, for example, adjective-noun compounds (eg. \u201cred cars\u201d\u2286\u201ccars\u201d) and homographs (eg. \u201ctongue\u201d\u2229\u201cbody\u201d should be similar to \u201cmouth\u201d, while \u201ctongue\u201d\u2229\u201clanguage\u201d should be similar to \u201cdialect\u201d) have natural set-theoretic interpretations. Box embeddings are a novel region-based representation which provide the capability to perform these set-theoretic operations. In this work, we provide a fuzzy-set interpretation of box embeddings, and learn box representations of words using a set-theoretic training objective. We demonstrate improved performance on various word similarity tasks, particularly on less common words, and perform a quantitative and qualitative analysis exploring the additional unique expressivity provided by Word2Box.", "doc_id": "d75b7cedfbebfb8d0b7445672ccbce00", "publication_year": 2022, "sentences": ["learning representations of words in a continuous space is perhaps the most fundamental task in nlp , however words interact in ways much richer than vector dot product similarity can provide .", "many relationships between words can be expressed set - theoretically , for example , adjective - noun compounds ( eg . \u201c red cars \u201d \u2286 \u201c cars \u201d ) and homographs ( eg . \u201c tongue \u201d \u2229 \u201c body \u201d should be similar to \u201c mouth \u201d , while \u201c tongue \u201d \u2229 \u201c language \u201d should be similar to \u201c dialect \u201d ) have natural set - theoretic interpretations .", "box embeddings are a novel region - based representation which provide the capability to perform these set - theoretic operations .", "in this work , we provide a fuzzy - set interpretation of box embeddings , and learn box representations of words using a set - theoretic training objective .", "we demonstrate improved performance on various word similarity tasks , particularly on less common words , and perform a quantitative and qualitative analysis exploring the additional unique expressivity provided by word2box ."], "events": [{"event_type": "ITT", "arguments": [{"text": "representations of words", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["representations", "of", "words"], "offsets": [1, 2, 3]}], "trigger": {"text": "learning", "tokens": ["learning"], "offsets": [0]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [130]}, {"text": "fuzzy - set interpretation of box embeddings", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["fuzzy", "-", "set", "interpretation", "of", "box", "embeddings"], "offsets": [133, 134, 135, 136, 137, 138, 139]}], "trigger": {"text": "provide", "tokens": ["provide"], "offsets": [131]}}, {"event_type": "MDS", "arguments": [{"text": "learn", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["learn"], "offsets": [142]}, {"text": "a set - theoretic", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["a", "set", "-", "theoretic"], "offsets": [148, 149, 150, 151]}, {"text": "objective", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["objective"], "offsets": [153]}], "trigger": {"text": "training", "tokens": ["training"], "offsets": [152]}}, {"event_type": "PUR", "arguments": [{"text": "box representations of words", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["box", "representations", "of", "words"], "offsets": [143, 144, 145, 146]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [142]}}, {"event_type": "FAC", "arguments": [{"text": "improved performance", "nugget_type": "STR", "argument_type": "Subject", "tokens": ["improved", "performance"], "offsets": [157, 158]}, {"text": "on various word similarity tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "various", "word", "similarity", "tasks"], "offsets": [159, 160, 161, 162, 163]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [156]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [155]}, {"text": "quantitative and qualitative analysis", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["quantitative", "and", "qualitative", "analysis"], "offsets": [174, 175, 176, 177]}, {"text": "additional unique expressivity provided by word2box", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["additional", "unique", "expressivity", "provided", "by", "word2box"], "offsets": [180, 181, 182, 183, 184, 185]}], "trigger": {"text": "perform", "tokens": ["perform"], "offsets": [172]}}], "document": ["learning", "representations", "of", "words", "in", "a", "continuous", "space", "is", "perhaps", "the", "most", "fundamental", "task", "in", "nlp", ",", "however", "words", "interact", "in", "ways", "much", "richer", "than", "vector", "dot", "product", "similarity", "can", "provide", ".", "many", "relationships", "between", "words", "can", "be", "expressed", "set", "-", "theoretically", ",", "for", "example", ",", "adjective", "-", "noun", "compounds", "(", "eg", ".", "\u201c", "red", "cars", "\u201d", "\u2286", "\u201c", "cars", "\u201d", ")", "and", "homographs", "(", "eg", ".", "\u201c", "tongue", "\u201d", "\u2229", "\u201c", "body", "\u201d", "should", "be", "similar", "to", "\u201c", "mouth", "\u201d", ",", "while", "\u201c", "tongue", "\u201d", "\u2229", "\u201c", "language", "\u201d", "should", "be", "similar", "to", "\u201c", "dialect", "\u201d", ")", "have", "natural", "set", "-", "theoretic", "interpretations", ".", "box", "embeddings", "are", "a", "novel", "region", "-", "based", "representation", "which", "provide", "the", "capability", "to", "perform", "these", "set", "-", "theoretic", "operations", ".", "in", "this", "work", ",", "we", "provide", "a", "fuzzy", "-", "set", "interpretation", "of", "box", "embeddings", ",", "and", "learn", "box", "representations", "of", "words", "using", "a", "set", "-", "theoretic", "training", "objective", ".", "we", "demonstrate", "improved", "performance", "on", "various", "word", "similarity", "tasks", ",", "particularly", "on", "less", "common", "words", ",", "and", "perform", "a", "quantitative", "and", "qualitative", "analysis", "exploring", "the", "additional", "unique", "expressivity", "provided", "by", "word2box", "."]}, {"venue": "ACL", "title": "Refer360\u2218: A Referring Expression Recognition Dataset in 360\u2218 Images", "abstract": "We propose a novel large-scale referring expression recognition dataset, Refer360\u00b0, consisting of 17,137 instruction sequences and ground-truth actions for completing these instructions in 360\u00b0 scenes. Refer360\u00b0 differs from existing related datasets in three ways. First, we propose a more realistic scenario where instructors and the followers have partial, yet dynamic, views of the scene \u2013 followers continuously modify their field-of-view (FoV) while interpreting instructions that specify a final target location. Second, instructions to find the target location consist of multiple steps for followers who will start at random FoVs. As a result, intermediate instructions are strongly grounded in object references, and followers must identify intermediate FoVs to find the final target location correctly. Third, the target locations are neither restricted to predefined objects nor chosen by annotators; instead, they are distributed randomly across scenes. This \u201cpoint anywhere\u201d approach leads to more linguistically complex instructions, as shown in our analyses. Our examination of the dataset shows that Refer360\u00b0 manifests linguistically rich phenomena in a language grounding task that poses novel challenges for computational modeling of language, vision, and navigation.", "doc_id": "f06dbd04f9f90357d88e4c8cc992fdbf", "publication_year": 2020, "sentences": ["we propose a novel large - scale referring expression recognition dataset , refer360\u00b0 , consisting of 17 , 137 instruction sequences and ground - truth actions for completing these instructions in 360\u00b0 scenes .", "refer360\u00b0 differs from existing related datasets in three ways .", "first , we propose a more realistic scenario where instructors and the followers have partial , yet dynamic , views of the scene \u2013 followers continuously modify their field - of - view ( fov ) while interpreting instructions that specify a final target location .", "second , instructions to find the target location consist of multiple steps for followers who will start at random fovs .", "as a result , intermediate instructions are strongly grounded in object references , and followers must identify intermediate fovs to find the final target location correctly .", "third , the target locations are neither restricted to predefined objects nor chosen by annotators ; instead , they are distributed randomly across scenes .", "this \u201c point anywhere \u201d approach leads to more linguistically complex instructions , as shown in our analyses .", "our examination of the dataset shows that refer360\u00b0 manifests linguistically rich phenomena in a language grounding task that poses novel challenges for computational modeling of language , vision , and navigation ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [0]}, {"text": "large - scale referring expression recognition dataset", "nugget_type": "DST", "argument_type": "Content", "tokens": ["large", "-", "scale", "referring", "expression", "recognition", "dataset"], "offsets": [4, 5, 6, 7, 8, 9, 10]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [1]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [46]}, {"text": "more realistic scenario", "nugget_type": "APP", "argument_type": "Content", "tokens": ["more", "realistic", "scenario"], "offsets": [49, 50, 51]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [47]}}, {"event_type": "FAC", "arguments": [{"text": "\u201c point anywhere \u201d approach", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["\u201c", "point", "anywhere", "\u201d", "approach"], "offsets": [164, 165, 166, 167, 168]}, {"text": "more linguistically complex instructions", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["more", "linguistically", "complex", "instructions"], "offsets": [171, 172, 173, 174]}], "trigger": {"text": "leads", "tokens": ["leads"], "offsets": [169]}}, {"event_type": "FIN", "arguments": [{"text": "manifests", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["manifests"], "offsets": [190]}], "trigger": {"text": "shows", "tokens": ["shows"], "offsets": [187]}}, {"event_type": "FAC", "arguments": [{"text": "language grounding task", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["language", "grounding", "task"], "offsets": [196, 197, 198]}, {"text": "refer360\u00b0", "nugget_type": "DST", "argument_type": "Subject", "tokens": ["refer360\u00b0"], "offsets": [189]}, {"text": "linguistically rich phenomena", "nugget_type": "STR", "argument_type": "Object", "tokens": ["linguistically", "rich", "phenomena"], "offsets": [191, 192, 193]}], "trigger": {"text": "manifests", "tokens": ["manifests"], "offsets": [190]}}], "document": ["we", "propose", "a", "novel", "large", "-", "scale", "referring", "expression", "recognition", "dataset", ",", "refer360\u00b0", ",", "consisting", "of", "17", ",", "137", "instruction", "sequences", "and", "ground", "-", "truth", "actions", "for", "completing", "these", "instructions", "in", "360\u00b0", "scenes", ".", "refer360\u00b0", "differs", "from", "existing", "related", "datasets", "in", "three", "ways", ".", "first", ",", "we", "propose", "a", "more", "realistic", "scenario", "where", "instructors", "and", "the", "followers", "have", "partial", ",", "yet", "dynamic", ",", "views", "of", "the", "scene", "\u2013", "followers", "continuously", "modify", "their", "field", "-", "of", "-", "view", "(", "fov", ")", "while", "interpreting", "instructions", "that", "specify", "a", "final", "target", "location", ".", "second", ",", "instructions", "to", "find", "the", "target", "location", "consist", "of", "multiple", "steps", "for", "followers", "who", "will", "start", "at", "random", "fovs", ".", "as", "a", "result", ",", "intermediate", "instructions", "are", "strongly", "grounded", "in", "object", "references", ",", "and", "followers", "must", "identify", "intermediate", "fovs", "to", "find", "the", "final", "target", "location", "correctly", ".", "third", ",", "the", "target", "locations", "are", "neither", "restricted", "to", "predefined", "objects", "nor", "chosen", "by", "annotators", ";", "instead", ",", "they", "are", "distributed", "randomly", "across", "scenes", ".", "this", "\u201c", "point", "anywhere", "\u201d", "approach", "leads", "to", "more", "linguistically", "complex", "instructions", ",", "as", "shown", "in", "our", "analyses", ".", "our", "examination", "of", "the", "dataset", "shows", "that", "refer360\u00b0", "manifests", "linguistically", "rich", "phenomena", "in", "a", "language", "grounding", "task", "that", "poses", "novel", "challenges", "for", "computational", "modeling", "of", "language", ",", "vision", ",", "and", "navigation", "."]}, {"venue": "ACL", "title": "Supporting Land Reuse of Former Open Pit Mining Sites using Text Classification and Active Learning", "abstract": "Open pit mines left many regions worldwide inhospitable or uninhabitable. Many sites are left behind in a hazardous or contaminated state, show remnants of waste, or have other restrictions imposed upon them, e.g., for the protection of human or nature. Such information has to be permanently managed in order to reuse those areas in the future. In this work we present and evaluate an automated workflow for supporting the post-mining management of former lignite open pit mines in the eastern part of Germany, where prior to any planned land reuse, aforementioned information has to be acquired to ensure the safety and validity of such an endeavor. Usually, this information is found in expert reports, either in the form of paper documents, or in the best case as digitized unstructured text\u2014all of them in German language. However, due to the size and complexity of these documents, any inquiry is tedious and time-consuming, thereby slowing down or even obstructing the reuse of related areas. Since no training data is available, we employ active learning in order to perform multi-label sentence classification for two categories of restrictions and seven categories of topics. The final system integrates optical character recognition (OCR), active-learning-based text classification, and geographic information system visualization in order to effectively extract, query, and visualize this information for any area of interest. Active learning and text classification results are twofold: Whereas the restriction categories were reasonably accurate (>0.85 F1), the seven topic-oriented categories seemed to be complex even for human annotators and achieved mediocre evaluation scores (<0.70 F1).", "doc_id": "0528e590c79ec24fcf87bfa529d8a152", "publication_year": 2021, "sentences": ["open pit mines left many regions worldwide inhospitable or uninhabitable .", "many sites are left behind in a hazardous or contaminated state , show remnants of waste , or have other restrictions imposed upon them , e . g . , for the protection of human or nature .", "such information has to be permanently managed in order to reuse those areas in the future .", "in this work we present and evaluate an automated workflow for supporting the post - mining management of former lignite open pit mines in the eastern part of germany , where prior to any planned land reuse , aforementioned information has to be acquired to ensure the safety and validity of such an endeavor .", "usually , this information is found in expert reports , either in the form of paper documents , or in the best case as digitized unstructured text \u2014 all of them in german language .", "however , due to the size and complexity of these documents , any inquiry is tedious and time - consuming , thereby slowing down or even obstructing the reuse of related areas .", "since no training data is available , we employ active learning in order to perform multi - label sentence classification for two categories of restrictions and seven categories of topics .", "the final system integrates optical character recognition ( ocr ) , active - learning - based text classification , and geographic information system visualization in order to effectively extract , query , and visualize this information for any area of interest .", "active learning and text classification results are twofold :", "whereas the restriction categories were reasonably accurate ( > 0 . 85 f1 ) , the seven topic - oriented categories seemed to be complex even for human annotators and achieved mediocre evaluation scores ( < 0 . 70 f1 ) ."], "events": [{"event_type": "MDS", "arguments": [{"text": "two categories of restrictions", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["two", "categories", "of", "restrictions"], "offsets": [210, 211, 212, 213]}, {"text": "seven categories of topics", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["seven", "categories", "of", "topics"], "offsets": [215, 216, 217, 218]}, {"text": "active learning", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["active", "learning"], "offsets": [198, 199]}, {"text": "perform", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["perform"], "offsets": [203]}], "trigger": {"text": "employ", "tokens": ["employ"], "offsets": [197]}}, {"event_type": "PUR", "arguments": [{"text": "multi - label sentence classification", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["multi", "-", "label", "sentence", "classification"], "offsets": [204, 205, 206, 207, 208]}], "trigger": {"text": "perform", "tokens": ["perform"], "offsets": [203]}}], "document": ["open", "pit", "mines", "left", "many", "regions", "worldwide", "inhospitable", "or", "uninhabitable", ".", "many", "sites", "are", "left", "behind", "in", "a", "hazardous", "or", "contaminated", "state", ",", "show", "remnants", "of", "waste", ",", "or", "have", "other", "restrictions", "imposed", "upon", "them", ",", "e", ".", "g", ".", ",", "for", "the", "protection", "of", "human", "or", "nature", ".", "such", "information", "has", "to", "be", "permanently", "managed", "in", "order", "to", "reuse", "those", "areas", "in", "the", "future", ".", "in", "this", "work", "we", "present", "and", "evaluate", "an", "automated", "workflow", "for", "supporting", "the", "post", "-", "mining", "management", "of", "former", "lignite", "open", "pit", "mines", "in", "the", "eastern", "part", "of", "germany", ",", "where", "prior", "to", "any", "planned", "land", "reuse", ",", "aforementioned", "information", "has", "to", "be", "acquired", "to", "ensure", "the", "safety", "and", "validity", "of", "such", "an", "endeavor", ".", "usually", ",", "this", "information", "is", "found", "in", "expert", "reports", ",", "either", "in", "the", "form", "of", "paper", "documents", ",", "or", "in", "the", "best", "case", "as", "digitized", "unstructured", "text", "\u2014", "all", "of", "them", "in", "german", "language", ".", "however", ",", "due", "to", "the", "size", "and", "complexity", "of", "these", "documents", ",", "any", "inquiry", "is", "tedious", "and", "time", "-", "consuming", ",", "thereby", "slowing", "down", "or", "even", "obstructing", "the", "reuse", "of", "related", "areas", ".", "since", "no", "training", "data", "is", "available", ",", "we", "employ", "active", "learning", "in", "order", "to", "perform", "multi", "-", "label", "sentence", "classification", "for", "two", "categories", "of", "restrictions", "and", "seven", "categories", "of", "topics", ".", "the", "final", "system", "integrates", "optical", "character", "recognition", "(", "ocr", ")", ",", "active", "-", "learning", "-", "based", "text", "classification", ",", "and", "geographic", "information", "system", "visualization", "in", "order", "to", "effectively", "extract", ",", "query", ",", "and", "visualize", "this", "information", "for", "any", "area", "of", "interest", ".", "active", "learning", "and", "text", "classification", "results", "are", "twofold", ":", "whereas", "the", "restriction", "categories", "were", "reasonably", "accurate", "(", ">", "0", ".", "85", "f1", ")", ",", "the", "seven", "topic", "-", "oriented", "categories", "seemed", "to", "be", "complex", "even", "for", "human", "annotators", "and", "achieved", "mediocre", "evaluation", "scores", "(", "<", "0", ".", "70", "f1", ")", "."]}, {"venue": "ACL", "title": "Discriminative Marginalized Probabilistic Neural Method for Multi-Document Summarization of Medical Literature", "abstract": "Although current state-of-the-art Transformer-based solutions succeeded in a wide range for single-document NLP tasks, they still struggle to address multi-input tasks such as multi-document summarization. Many solutions truncate the inputs, thus ignoring potential summary-relevant contents, which is unacceptable in the medical domain where each information can be vital. Others leverage linear model approximations to apply multi-input concatenation, worsening the results because all information is considered, even if it is conflicting or noisy with respect to a shared background. Despite the importance and social impact of medicine, there are no ad-hoc solutions for multi-document summarization. For this reason, we propose a novel discriminative marginalized probabilistic method (DAMEN) trained to discriminate critical information from a cluster of topic-related medical documents and generate a multi-document summary via token probability marginalization. Results prove we outperform the previous state-of-the-art on a biomedical dataset for multi-document summarization of systematic literature reviews. Moreover, we perform extensive ablation studies to motivate the design choices and prove the importance of each module of our method.", "doc_id": "61ae00f7645b526717c6ec56d36905a2", "publication_year": 2022, "sentences": ["although current state - of - the - art transformer - based solutions succeeded in a wide range for single - document nlp tasks , they still struggle to address multi - input tasks such as multi - document summarization .", "many solutions truncate the inputs , thus ignoring potential summary - relevant contents , which is unacceptable in the medical domain where each information can be vital .", "others leverage linear model approximations to apply multi - input concatenation , worsening the results because all information is considered , even if it is conflicting or noisy with respect to a shared background .", "despite the importance and social impact of medicine , there are no ad - hoc solutions for multi - document summarization .", "for this reason , we propose a novel discriminative marginalized probabilistic method ( damen ) trained to discriminate critical information from a cluster of topic - related medical documents and generate a multi - document summary via token probability marginalization .", "results prove we outperform the previous state - of - the - art on a biomedical dataset for multi - document summarization of systematic literature reviews .", "moreover , we perform extensive ablation studies to motivate the design choices and prove the importance of each module of our method ."], "events": [{"event_type": "RWF", "arguments": [{"text": "current state - of - the - art transformer - based solutions", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["current", "state", "-", "of", "-", "the", "-", "art", "transformer", "-", "based", "solutions"], "offsets": [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]}, {"text": "struggle", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["struggle"], "offsets": [27]}], "trigger": {"text": "struggle", "tokens": ["struggle"], "offsets": [27]}}, {"event_type": "PUR", "arguments": [{"text": "multi - input tasks", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["multi", "-", "input", "tasks"], "offsets": [30, 31, 32, 33]}], "trigger": {"text": "address", "tokens": ["address"], "offsets": [29]}}, {"event_type": "RWF", "arguments": [{"text": "solutions", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["solutions"], "offsets": [42]}, {"text": "inputs", "nugget_type": "FEA", "argument_type": "Fault", "tokens": ["inputs"], "offsets": [45]}], "trigger": {"text": "truncate", "tokens": ["truncate"], "offsets": [43]}}, {"event_type": "RWF", "arguments": [{"text": "ignoring", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["ignoring"], "offsets": [48]}, {"text": "solutions", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["solutions"], "offsets": [42]}], "trigger": {"text": "ignoring", "tokens": ["ignoring"], "offsets": [48]}}, {"event_type": "RWF", "arguments": [{"text": "worsening", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["worsening"], "offsets": [81]}], "trigger": {"text": "worsening", "tokens": ["worsening"], "offsets": [81]}}, {"event_type": "PRP", "arguments": [{"text": "discriminative marginalized probabilistic method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["discriminative", "marginalized", "probabilistic", "method"], "offsets": [134, 135, 136, 137]}, {"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [130]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [131]}}, {"event_type": "MDS", "arguments": [{"text": "multi - document summary", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["multi", "-", "document", "summary"], "offsets": [158, 159, 160, 161]}, {"text": "token probability marginalization", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["token", "probability", "marginalization"], "offsets": [163, 164, 165]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [156]}}, {"event_type": "FIN", "arguments": [{"text": "outperform", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperform"], "offsets": [170]}], "trigger": {"text": "prove", "tokens": ["prove"], "offsets": [168]}}, {"event_type": "CMP", "arguments": [{"text": "discriminative marginalized probabilistic method", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["discriminative", "marginalized", "probabilistic", "method"], "offsets": [134, 135, 136, 137]}, {"text": "previous state - of - the - art", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["previous", "state", "-", "of", "-", "the", "-", "art"], "offsets": [172, 173, 174, 175, 176, 177, 178, 179]}, {"text": "biomedical dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["biomedical", "dataset"], "offsets": [182, 183]}, {"text": "multi - document summarization", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["multi", "-", "document", "summarization"], "offsets": [185, 186, 187, 188]}], "trigger": {"text": "outperform", "tokens": ["outperform"], "offsets": [170]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [196]}, {"text": "motivate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["motivate"], "offsets": [202]}, {"text": "prove", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["prove"], "offsets": [207]}, {"text": "ablation studies", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["ablation", "studies"], "offsets": [199, 200]}], "trigger": {"text": "perform", "tokens": ["perform"], "offsets": [197]}}, {"event_type": "PUR", "arguments": [{"text": "design choices", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["design", "choices"], "offsets": [204, 205]}], "trigger": {"text": "motivate", "tokens": ["motivate"], "offsets": [202]}}, {"event_type": "PUR", "arguments": [{"text": "importance of each module", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["importance", "of", "each", "module"], "offsets": [209, 210, 211, 212]}], "trigger": {"text": "prove", "tokens": ["prove"], "offsets": [207]}}, {"event_type": "RWF", "arguments": [{"text": "all information", "nugget_type": "FEA", "argument_type": "Fault", "tokens": ["all", "information"], "offsets": [85, 86]}], "trigger": {"text": "considered", "tokens": ["considered"], "offsets": [88]}}, {"event_type": "RWS", "arguments": [{"text": "linear model approximations", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["linear", "model", "approximations"], "offsets": [71, 72, 73]}, {"text": "multi - input concatenation", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["multi", "-", "input", "concatenation"], "offsets": [76, 77, 78, 79]}], "trigger": {"text": "apply", "tokens": ["apply"], "offsets": [75]}}, {"event_type": "RWF", "arguments": [{"text": "conflicting or noisy", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["conflicting", "or", "noisy"], "offsets": [94, 95, 96]}], "trigger": {"text": "conflicting or noisy", "tokens": ["conflicting", "or", "noisy"], "offsets": [94, 95, 96]}}, {"event_type": "MDS", "arguments": [{"text": "critical information", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["critical", "information"], "offsets": [144, 145]}, {"text": "cluster of topic - related medical documents", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["cluster", "of", "topic", "-", "related", "medical", "documents"], "offsets": [148, 149, 150, 151, 152, 153, 154]}], "trigger": {"text": "discriminate", "tokens": ["discriminate"], "offsets": [143]}}], "document": ["although", "current", "state", "-", "of", "-", "the", "-", "art", "transformer", "-", "based", "solutions", "succeeded", "in", "a", "wide", "range", "for", "single", "-", "document", "nlp", "tasks", ",", "they", "still", "struggle", "to", "address", "multi", "-", "input", "tasks", "such", "as", "multi", "-", "document", "summarization", ".", "many", "solutions", "truncate", "the", "inputs", ",", "thus", "ignoring", "potential", "summary", "-", "relevant", "contents", ",", "which", "is", "unacceptable", "in", "the", "medical", "domain", "where", "each", "information", "can", "be", "vital", ".", "others", "leverage", "linear", "model", "approximations", "to", "apply", "multi", "-", "input", "concatenation", ",", "worsening", "the", "results", "because", "all", "information", "is", "considered", ",", "even", "if", "it", "is", "conflicting", "or", "noisy", "with", "respect", "to", "a", "shared", "background", ".", "despite", "the", "importance", "and", "social", "impact", "of", "medicine", ",", "there", "are", "no", "ad", "-", "hoc", "solutions", "for", "multi", "-", "document", "summarization", ".", "for", "this", "reason", ",", "we", "propose", "a", "novel", "discriminative", "marginalized", "probabilistic", "method", "(", "damen", ")", "trained", "to", "discriminate", "critical", "information", "from", "a", "cluster", "of", "topic", "-", "related", "medical", "documents", "and", "generate", "a", "multi", "-", "document", "summary", "via", "token", "probability", "marginalization", ".", "results", "prove", "we", "outperform", "the", "previous", "state", "-", "of", "-", "the", "-", "art", "on", "a", "biomedical", "dataset", "for", "multi", "-", "document", "summarization", "of", "systematic", "literature", "reviews", ".", "moreover", ",", "we", "perform", "extensive", "ablation", "studies", "to", "motivate", "the", "design", "choices", "and", "prove", "the", "importance", "of", "each", "module", "of", "our", "method", "."]}, {"venue": "ACL", "title": "DEAM: Dialogue Coherence Evaluation using AMR-based Semantic Manipulations", "abstract": "Automatic evaluation metrics are essential for the rapid development of open-domain dialogue systems as they facilitate hyper-parameter tuning and comparison between models. Although recently proposed trainable conversation-level metrics have shown encouraging results, the quality of the metrics is strongly dependent on the quality of training data. Prior works mainly resort to heuristic text-level manipulations (e.g. utterances shuffling) to bootstrap incoherent conversations (negative examples) from coherent dialogues (positive examples). Such approaches are insufficient to appropriately reflect the incoherence that occurs in interactions between advanced dialogue models and humans. To tackle this problem, we propose DEAM, a Dialogue coherence Evaluation metric that relies on Abstract Meaning Representation (AMR) to apply semantic-level Manipulations for incoherent (negative) data generation. AMRs naturally facilitate the injection of various types of incoherence sources, such as coreference inconsistency, irrelevancy, contradictions, and decrease engagement, at the semantic level, thus resulting in more natural incoherent samples. Our experiments show that DEAM achieves higher correlations with human judgments compared to baseline methods on several dialog datasets by significant margins. We also show that DEAM can distinguish between coherent and incoherent dialogues generated by baseline manipulations, whereas those baseline models cannot detect incoherent examples generated by DEAM. Our results demonstrate the potential of AMR-based semantic manipulations for natural negative example generation.", "doc_id": "68d883fd2cfe9e80ea9193d5b1e0b588", "publication_year": 2022, "sentences": ["automatic evaluation metrics are essential for the rapid development of open - domain dialogue systems as they facilitate hyper - parameter tuning and comparison between models .", "although recently proposed trainable conversation - level metrics have shown encouraging results , the quality of the metrics is strongly dependent on the quality of training data .", "prior works mainly resort to heuristic text - level manipulations ( e . g . utterances shuffling ) to bootstrap incoherent conversations ( negative examples ) from coherent dialogues ( positive examples ) .", "such approaches are insufficient to appropriately reflect the incoherence that occurs in interactions between advanced dialogue models and humans .", "to tackle this problem , we propose deam , a dialogue coherence evaluation metric that relies on abstract meaning representation ( amr ) to apply semantic - level manipulations for incoherent ( negative ) data generation .", "amrs naturally facilitate the injection of various types of incoherence sources , such as coreference inconsistency , irrelevancy , contradictions , and decrease engagement , at the semantic level , thus resulting in more natural incoherent samples .", "our experiments show that deam achieves higher correlations with human judgments compared to baseline methods on several dialog datasets by significant margins .", "we also show that deam can distinguish between coherent and incoherent dialogues generated by baseline manipulations , whereas those baseline models cannot detect incoherent examples generated by deam .", "our results demonstrate the potential of amr - based semantic manipulations for natural negative example generation ."], "events": [{"event_type": "ITT", "arguments": [{"text": "automatic evaluation metrics", "nugget_type": "APP", "argument_type": "Target", "tokens": ["automatic", "evaluation", "metrics"], "offsets": [0, 1, 2]}], "trigger": {"text": "essential", "tokens": ["essential"], "offsets": [4]}}, {"event_type": "RWS", "arguments": [{"text": "coherent dialogues", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["coherent", "dialogues"], "offsets": [82, 83]}, {"text": "incoherent conversations", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["incoherent", "conversations"], "offsets": [75, 76]}], "trigger": {"text": "bootstrap", "tokens": ["bootstrap"], "offsets": [74]}}, {"event_type": "RWF", "arguments": [{"text": "insufficient to appropriately reflect", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["insufficient", "to", "appropriately", "reflect"], "offsets": [92, 93, 94, 95]}, {"text": "incoherence", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["incoherence"], "offsets": [97]}], "trigger": {"text": "insufficient to appropriately reflect", "tokens": ["insufficient", "to", "appropriately", "reflect"], "offsets": [92, 93, 94, 95]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [114]}, {"text": "dialogue coherence evaluation metric", "nugget_type": "APP", "argument_type": "Content", "tokens": ["dialogue", "coherence", "evaluation", "metric"], "offsets": [119, 120, 121, 122]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [115]}}, {"event_type": "WKS", "arguments": [{"text": "abstract meaning representation", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["abstract", "meaning", "representation"], "offsets": [126, 127, 128]}, {"text": "apply", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["apply"], "offsets": [133]}, {"text": "incoherent ( negative ) data generation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["incoherent", "data", "generation"], "offsets": [139, 143, 144]}], "trigger": {"text": "relies", "tokens": ["relies"], "offsets": [124]}}, {"event_type": "PUR", "arguments": [{"text": "semantic - level manipulations", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["semantic", "-", "level", "manipulations"], "offsets": [134, 135, 136, 137]}], "trigger": {"text": "apply", "tokens": ["apply"], "offsets": [133]}}, {"event_type": "WKS", "arguments": [{"text": "injection of various types of incoherence sources", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["injection", "of", "various", "types", "of", "incoherence", "sources"], "offsets": [150, 151, 152, 153, 154, 155, 156]}, {"text": "at the semantic level", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["at", "the", "semantic", "level"], "offsets": [171, 172, 173, 174]}, {"text": "resulting", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["resulting"], "offsets": [177]}], "trigger": {"text": "facilitate", "tokens": ["facilitate"], "offsets": [148]}}, {"event_type": "PUR", "arguments": [{"text": "more natural incoherent samples", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["more", "natural", "incoherent", "samples"], "offsets": [179, 180, 181, 182]}], "trigger": {"text": "resulting", "tokens": ["resulting"], "offsets": [177]}}, {"event_type": "FIN", "arguments": [{"text": "achieves", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["achieves"], "offsets": [189]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [186]}}, {"event_type": "CMP", "arguments": [{"text": "deam", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["deam"], "offsets": [188]}, {"text": "higher", "nugget_type": "STR", "argument_type": "Result", "tokens": ["higher"], "offsets": [190]}, {"text": "correlations", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["correlations"], "offsets": [191]}, {"text": "with human judgments", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "human", "judgments"], "offsets": [192, 193, 194]}, {"text": "baseline methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["baseline", "methods"], "offsets": [197, 198]}, {"text": "several dialog datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["several", "dialog", "datasets"], "offsets": [200, 201, 202]}, {"text": "significant margins", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significant", "margins"], "offsets": [204, 205]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [189]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [207]}, {"text": "distinguish", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["distinguish"], "offsets": [213]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [209]}}, {"event_type": "FAC", "arguments": [{"text": "deam", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["deam"], "offsets": [211]}, {"text": "generated by baseline manipulations", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["generated", "by", "baseline", "manipulations"], "offsets": [219, 220, 221, 222]}, {"text": "coherent dialogues", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["coherent", "dialogues"], "offsets": [215, 218]}, {"text": "incoherent dialogues", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["incoherent", "dialogues"], "offsets": [217, 218]}], "trigger": {"text": "distinguish", "tokens": ["distinguish"], "offsets": [213]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [207]}, {"text": "cannot detect", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["cannot", "detect"], "offsets": [228, 229]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [209]}}, {"event_type": "FAC", "arguments": [{"text": "baseline models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["baseline", "models"], "offsets": [226, 227]}, {"text": "incoherent examples", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["incoherent", "examples"], "offsets": [230, 231]}, {"text": "generated by deam", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["generated", "by", "deam"], "offsets": [232, 233, 234]}], "trigger": {"text": "cannot detect", "tokens": ["cannot", "detect"], "offsets": [228, 229]}}, {"event_type": "FAC", "arguments": [{"text": "potential of amr - based semantic manipulations", "nugget_type": "STR", "argument_type": "Object", "tokens": ["potential", "of", "amr", "-", "based", "semantic", "manipulations"], "offsets": [240, 241, 242, 243, 244, 245, 246]}, {"text": "natural negative example generation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["natural", "negative", "example", "generation"], "offsets": [248, 249, 250, 251]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [238]}}], "document": ["automatic", "evaluation", "metrics", "are", "essential", "for", "the", "rapid", "development", "of", "open", "-", "domain", "dialogue", "systems", "as", "they", "facilitate", "hyper", "-", "parameter", "tuning", "and", "comparison", "between", "models", ".", "although", "recently", "proposed", "trainable", "conversation", "-", "level", "metrics", "have", "shown", "encouraging", "results", ",", "the", "quality", "of", "the", "metrics", "is", "strongly", "dependent", "on", "the", "quality", "of", "training", "data", ".", "prior", "works", "mainly", "resort", "to", "heuristic", "text", "-", "level", "manipulations", "(", "e", ".", "g", ".", "utterances", "shuffling", ")", "to", "bootstrap", "incoherent", "conversations", "(", "negative", "examples", ")", "from", "coherent", "dialogues", "(", "positive", "examples", ")", ".", "such", "approaches", "are", "insufficient", "to", "appropriately", "reflect", "the", "incoherence", "that", "occurs", "in", "interactions", "between", "advanced", "dialogue", "models", "and", "humans", ".", "to", "tackle", "this", "problem", ",", "we", "propose", "deam", ",", "a", "dialogue", "coherence", "evaluation", "metric", "that", "relies", "on", "abstract", "meaning", "representation", "(", "amr", ")", "to", "apply", "semantic", "-", "level", "manipulations", "for", "incoherent", "(", "negative", ")", "data", "generation", ".", "amrs", "naturally", "facilitate", "the", "injection", "of", "various", "types", "of", "incoherence", "sources", ",", "such", "as", "coreference", "inconsistency", ",", "irrelevancy", ",", "contradictions", ",", "and", "decrease", "engagement", ",", "at", "the", "semantic", "level", ",", "thus", "resulting", "in", "more", "natural", "incoherent", "samples", ".", "our", "experiments", "show", "that", "deam", "achieves", "higher", "correlations", "with", "human", "judgments", "compared", "to", "baseline", "methods", "on", "several", "dialog", "datasets", "by", "significant", "margins", ".", "we", "also", "show", "that", "deam", "can", "distinguish", "between", "coherent", "and", "incoherent", "dialogues", "generated", "by", "baseline", "manipulations", ",", "whereas", "those", "baseline", "models", "cannot", "detect", "incoherent", "examples", "generated", "by", "deam", ".", "our", "results", "demonstrate", "the", "potential", "of", "amr", "-", "based", "semantic", "manipulations", "for", "natural", "negative", "example", "generation", "."]}, {"venue": "ACL", "title": "Mitigating Bias in Session-based Cyberbullying Detection: A Non-Compromising Approach", "abstract": "The element of repetition in cyberbullying behavior has directed recent computational studies toward detecting cyberbullying based on a social media session. In contrast to a single text, a session may consist of an initial post and an associated sequence of comments. Yet, emerging efforts to enhance the performance of session-based cyberbullying detection have largely overlooked unintended social biases in existing cyberbullying datasets. For example, a session containing certain demographic-identity terms (e.g., \u201cgay\u201d or \u201cblack\u201d) is more likely to be classified as an instance of cyberbullying. In this paper, we first show evidence of such bias in models trained on sessions collected from different social media platforms (e.g., Instagram). We then propose a context-aware and model-agnostic debiasing strategy that leverages a reinforcement learning technique, without requiring any extra resources or annotations apart from a pre-defined set of sensitive triggers commonly used for identifying cyberbullying instances. Empirical evaluations show that the proposed strategy can simultaneously alleviate the impacts of the unintended biases and improve the detection performance.", "doc_id": "8bf67e8eaf8c9e2505473c79376abbd5", "publication_year": 2021, "sentences": ["the element of repetition in cyberbullying behavior has directed recent computational studies toward detecting cyberbullying based on a social media session .", "in contrast to a single text , a session may consist of an initial post and an associated sequence of comments .", "yet , emerging efforts to enhance the performance of session - based cyberbullying detection have largely overlooked unintended social biases in existing cyberbullying datasets .", "for example , a session containing certain demographic - identity terms ( e . g . , \u201c gay \u201d or \u201c black \u201d ) is more likely to be classified as an instance of cyberbullying .", "in this paper , we first show evidence of such bias in models trained on sessions collected from different social media platforms ( e . g . , instagram ) .", "we then propose a context - aware and model - agnostic debiasing strategy that leverages a reinforcement learning technique , without requiring any extra resources or annotations apart from a pre - defined set of sensitive triggers commonly used for identifying cyberbullying instances .", "empirical evaluations show that the proposed strategy can simultaneously alleviate the impacts of the unintended biases and improve the detection performance ."], "events": [{"event_type": "ITT", "arguments": [{"text": "computational studies", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["computational", "studies"], "offsets": [10, 11]}], "trigger": {"text": "directed", "tokens": ["directed"], "offsets": [8]}}, {"event_type": "RWF", "arguments": [{"text": "performance of session - based cyberbullying detection", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["performance", "of", "session", "-", "based", "cyberbullying", "detection"], "offsets": [51, 52, 53, 54, 55, 56, 57]}, {"text": "unintended social biases", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["unintended", "social", "biases"], "offsets": [61, 62, 63]}, {"text": "cyberbullying datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["cyberbullying", "datasets"], "offsets": [66, 67]}, {"text": "largely", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["largely"], "offsets": [59]}], "trigger": {"text": "overlooked", "tokens": ["overlooked"], "offsets": [60]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [110]}, {"text": "in models trained on sessions collected from different social media platforms", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "models", "trained", "on", "sessions", "collected", "from", "different", "social", "media", "platforms"], "offsets": [117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127]}, {"text": "evidence of such bias", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["evidence", "of", "unintended", "social", "biases"], "offsets": [113, 114, 61, 62, 63]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [112]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [137]}, {"text": "context - aware and model - agnostic debiasing strategy", "nugget_type": "APP", "argument_type": "Content", "tokens": ["context", "-", "aware", "and", "model", "-", "agnostic", "debiasing", "strategy"], "offsets": [141, 142, 143, 144, 145, 146, 147, 148, 149]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [139]}}, {"event_type": "MDS", "arguments": [{"text": "pre - defined set of sensitive triggers", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["pre", "-", "defined", "set", "of", "sensitive", "triggers"], "offsets": [167, 168, 169, 170, 171, 172, 173]}, {"text": "identifying", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["identifying"], "offsets": [177]}], "trigger": {"text": "used", "tokens": ["used"], "offsets": [175]}}, {"event_type": "PUR", "arguments": [{"text": "cyberbullying instances", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["cyberbullying", "instances"], "offsets": [178, 179]}], "trigger": {"text": "identifying", "tokens": ["identifying"], "offsets": [177]}}, {"event_type": "FAC", "arguments": [{"text": "context - aware and model - agnostic debiasing strategy", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["context", "-", "aware", "and", "model", "-", "agnostic", "debiasing", "strategy"], "offsets": [141, 142, 143, 144, 145, 146, 147, 148, 149]}, {"text": "impacts of the unintended biases", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["impacts", "of", "the", "unintended", "biases"], "offsets": [192, 193, 194, 195, 196]}], "trigger": {"text": "alleviate", "tokens": ["alleviate"], "offsets": [190]}}, {"event_type": "CMP", "arguments": [{"text": "context - aware and model - agnostic debiasing strategy", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["context", "-", "aware", "and", "model", "-", "agnostic", "debiasing", "strategy"], "offsets": [141, 142, 143, 144, 145, 146, 147, 148, 149]}, {"text": "improve", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improve"], "offsets": [198]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [198]}}, {"event_type": "FIN", "arguments": [{"text": "alleviate", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["alleviate"], "offsets": [190]}, {"text": "improve", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["improve"], "offsets": [198]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [183]}}], "document": ["the", "element", "of", "repetition", "in", "cyberbullying", "behavior", "has", "directed", "recent", "computational", "studies", "toward", "detecting", "cyberbullying", "based", "on", "a", "social", "media", "session", ".", "in", "contrast", "to", "a", "single", "text", ",", "a", "session", "may", "consist", "of", "an", "initial", "post", "and", "an", "associated", "sequence", "of", "comments", ".", "yet", ",", "emerging", "efforts", "to", "enhance", "the", "performance", "of", "session", "-", "based", "cyberbullying", "detection", "have", "largely", "overlooked", "unintended", "social", "biases", "in", "existing", "cyberbullying", "datasets", ".", "for", "example", ",", "a", "session", "containing", "certain", "demographic", "-", "identity", "terms", "(", "e", ".", "g", ".", ",", "\u201c", "gay", "\u201d", "or", "\u201c", "black", "\u201d", ")", "is", "more", "likely", "to", "be", "classified", "as", "an", "instance", "of", "cyberbullying", ".", "in", "this", "paper", ",", "we", "first", "show", "evidence", "of", "such", "bias", "in", "models", "trained", "on", "sessions", "collected", "from", "different", "social", "media", "platforms", "(", "e", ".", "g", ".", ",", "instagram", ")", ".", "we", "then", "propose", "a", "context", "-", "aware", "and", "model", "-", "agnostic", "debiasing", "strategy", "that", "leverages", "a", "reinforcement", "learning", "technique", ",", "without", "requiring", "any", "extra", "resources", "or", "annotations", "apart", "from", "a", "pre", "-", "defined", "set", "of", "sensitive", "triggers", "commonly", "used", "for", "identifying", "cyberbullying", "instances", ".", "empirical", "evaluations", "show", "that", "the", "proposed", "strategy", "can", "simultaneously", "alleviate", "the", "impacts", "of", "the", "unintended", "biases", "and", "improve", "the", "detection", "performance", "."]}, {"venue": "ACL", "title": "Multi-perspective Coherent Reasoning for Helpfulness Prediction of Multimodal Reviews", "abstract": "As more and more product reviews are posted in both text and images, Multimodal Review Analysis (MRA) becomes an attractive research topic. Among the existing review analysis tasks, helpfulness prediction on review text has become predominant due to its importance for e-commerce platforms and online shops, i.e. helping customers quickly acquire useful product information. This paper proposes a new task Multimodal Review Helpfulness Prediction (MRHP) aiming to analyze the review helpfulness from text and visual modalities. Meanwhile, a novel Multi-perspective Coherent Reasoning method (MCR) is proposed to solve the MRHP task, which conducts joint reasoning over texts and images from both the product and the review, and aggregates the signals to predict the review helpfulness. Concretely, we first propose a product-review coherent reasoning module to measure the intra- and inter-modal coherence between the target product and the review. In addition, we also devise an intra-review coherent reasoning module to identify the coherence between the text content and images of the review, which is a piece of strong evidence for review helpfulness prediction. To evaluate the effectiveness of MCR, we present two newly collected multimodal review datasets as benchmark evaluation resources for the MRHP task. Experimental results show that our MCR method can lead to a performance increase of up to 8.5% as compared to the best performing text-only model. The source code and datasets can be obtained from https://github.com/jhliu17/MCR.", "doc_id": "17da0fbe94c4bb9d6fb54aa432c0fd79", "publication_year": 2021, "sentences": ["as more and more product reviews are posted in both text and images , multimodal review analysis ( mra ) becomes an attractive research topic .", "among the existing review analysis tasks , helpfulness prediction on review text has become predominant due to its importance for e - commerce platforms and online shops , i . e . helping customers quickly acquire useful product information .", "this paper proposes a new task multimodal review helpfulness prediction ( mrhp ) aiming to analyze the review helpfulness from text and visual modalities .", "meanwhile , a novel multi - perspective coherent reasoning method ( mcr ) is proposed to solve the mrhp task , which conducts joint reasoning over texts and images from both the product and the review , and aggregates the signals to predict the review helpfulness .", "concretely , we first propose a product - review coherent reasoning module to measure the intra - and inter - modal coherence between the target product and the review .", "in addition , we also devise an intra - review coherent reasoning module to identify the coherence between the text content and images of the review , which is a piece of strong evidence for review helpfulness prediction .", "to evaluate the effectiveness of mcr , we present two newly collected multimodal review datasets as benchmark evaluation resources for the mrhp task .", "experimental results show that our mcr method can lead to a performance increase of up to 8 . 5 % as compared to the best performing text - only model .", "the source code and datasets can be obtained from https : / / github . com / jhliu17 / mcr ."], "events": [{"event_type": "ITT", "arguments": [{"text": "multimodal review analysis", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["multimodal", "review", "analysis"], "offsets": [14, 15, 16]}], "trigger": {"text": "becomes", "tokens": ["becomes"], "offsets": [20]}}, {"event_type": "PRP", "arguments": [{"text": "analyze", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["analyze"], "offsets": [81]}, {"text": "multimodal review helpfulness prediction", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["multimodal", "review", "helpfulness", "prediction"], "offsets": [72, 73, 74, 75]}], "trigger": {"text": "proposes", "tokens": ["proposes"], "offsets": [68]}}, {"event_type": "PUR", "arguments": [{"text": "review helpfulness", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["review", "helpfulness"], "offsets": [83, 84]}, {"text": "from text and visual modalities", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "text", "and", "visual", "modalities"], "offsets": [85, 86, 87, 88, 89]}], "trigger": {"text": "analyze", "tokens": ["analyze"], "offsets": [81]}}, {"event_type": "PRP", "arguments": [{"text": "solve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["solve"], "offsets": [107]}, {"text": "multi - perspective coherent reasoning method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["multi", "-", "perspective", "coherent", "reasoning", "method"], "offsets": [95, 96, 97, 98, 99, 100]}], "trigger": {"text": "proposed", "tokens": ["proposed"], "offsets": [105]}}, {"event_type": "PUR", "arguments": [{"text": "multimodal review helpfulness prediction", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["multimodal", "review", "helpfulness", "prediction"], "offsets": [72, 73, 74, 75]}], "trigger": {"text": "solve", "tokens": ["solve"], "offsets": [107]}}, {"event_type": "PRP", "arguments": [{"text": "product - review coherent reasoning module", "nugget_type": "APP", "argument_type": "Content", "tokens": ["product", "-", "review", "coherent", "reasoning", "module"], "offsets": [144, 145, 146, 147, 148, 149]}, {"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [140]}, {"text": "measure", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["measure"], "offsets": [151]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [142]}}, {"event_type": "PUR", "arguments": [{"text": "intra - and inter - modal", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["intra", "-", "and", "inter", "-", "modal"], "offsets": [153, 154, 155, 156, 157, 158]}, {"text": "modal coherence", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["modal", "coherence"], "offsets": [158, 159]}], "trigger": {"text": "measure", "tokens": ["measure"], "offsets": [151]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [171]}, {"text": "identify", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["identify"], "offsets": [182]}, {"text": "intra - review coherent reasoning module", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["intra", "-", "review", "coherent", "reasoning", "module"], "offsets": [175, 176, 177, 178, 179, 180]}], "trigger": {"text": "devise", "tokens": ["devise"], "offsets": [173]}}, {"event_type": "PUR", "arguments": [], "trigger": {"text": "identify", "tokens": ["identify"], "offsets": [182]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [214]}, {"text": "two newly collected multimodal review datasets", "nugget_type": "DST", "argument_type": "Content", "tokens": ["two", "newly", "collected", "multimodal", "review", "datasets"], "offsets": [216, 217, 218, 219, 220, 221]}, {"text": "evaluate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["evaluate"], "offsets": [208]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [215]}}, {"event_type": "PUR", "arguments": [{"text": "effectiveness of mcr", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["effectiveness", "of", "mcr"], "offsets": [210, 211, 212]}], "trigger": {"text": "evaluate", "tokens": ["evaluate"], "offsets": [208]}}, {"event_type": "FIN", "arguments": [{"text": "compared", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["compared"], "offsets": [252]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [233]}}, {"event_type": "CMP", "arguments": [{"text": "multi - perspective coherent reasoning method", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["multi", "-", "perspective", "coherent", "reasoning", "method"], "offsets": [95, 96, 97, 98, 99, 100]}, {"text": "best performing text - only model", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["best", "performing", "text", "-", "only", "model"], "offsets": [255, 256, 257, 258, 259, 260]}, {"text": "increase", "nugget_type": "STR", "argument_type": "Result", "tokens": ["increase"], "offsets": [243]}, {"text": "8 . 5 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["8", ".", "5", "%"], "offsets": [247, 248, 249, 250]}, {"text": "performance", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["performance"], "offsets": [242]}], "trigger": {"text": "compared", "tokens": ["compared"], "offsets": [252]}}, {"event_type": "WKS", "arguments": [{"text": "joint reasoning", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["joint", "reasoning"], "offsets": [114, 115]}, {"text": "over texts and images from both the product and the review", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["over", "texts", "and", "images", "from", "both", "the", "product", "and", "the", "review"], "offsets": [116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126]}], "trigger": {"text": "conducts", "tokens": ["conducts"], "offsets": [113]}}, {"event_type": "WKS", "arguments": [{"text": "signals", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["signals"], "offsets": [131]}, {"text": "predict", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["predict"], "offsets": [133]}], "trigger": {"text": "aggregates", "tokens": ["aggregates"], "offsets": [129]}}, {"event_type": "PUR", "arguments": [{"text": "review helpfulness", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["review", "helpfulness"], "offsets": [135, 136]}], "trigger": {"text": "predict", "tokens": ["predict"], "offsets": [133]}}], "document": ["as", "more", "and", "more", "product", "reviews", "are", "posted", "in", "both", "text", "and", "images", ",", "multimodal", "review", "analysis", "(", "mra", ")", "becomes", "an", "attractive", "research", "topic", ".", "among", "the", "existing", "review", "analysis", "tasks", ",", "helpfulness", "prediction", "on", "review", "text", "has", "become", "predominant", "due", "to", "its", "importance", "for", "e", "-", "commerce", "platforms", "and", "online", "shops", ",", "i", ".", "e", ".", "helping", "customers", "quickly", "acquire", "useful", "product", "information", ".", "this", "paper", "proposes", "a", "new", "task", "multimodal", "review", "helpfulness", "prediction", "(", "mrhp", ")", "aiming", "to", "analyze", "the", "review", "helpfulness", "from", "text", "and", "visual", "modalities", ".", "meanwhile", ",", "a", "novel", "multi", "-", "perspective", "coherent", "reasoning", "method", "(", "mcr", ")", "is", "proposed", "to", "solve", "the", "mrhp", "task", ",", "which", "conducts", "joint", "reasoning", "over", "texts", "and", "images", "from", "both", "the", "product", "and", "the", "review", ",", "and", "aggregates", "the", "signals", "to", "predict", "the", "review", "helpfulness", ".", "concretely", ",", "we", "first", "propose", "a", "product", "-", "review", "coherent", "reasoning", "module", "to", "measure", "the", "intra", "-", "and", "inter", "-", "modal", "coherence", "between", "the", "target", "product", "and", "the", "review", ".", "in", "addition", ",", "we", "also", "devise", "an", "intra", "-", "review", "coherent", "reasoning", "module", "to", "identify", "the", "coherence", "between", "the", "text", "content", "and", "images", "of", "the", "review", ",", "which", "is", "a", "piece", "of", "strong", "evidence", "for", "review", "helpfulness", "prediction", ".", "to", "evaluate", "the", "effectiveness", "of", "mcr", ",", "we", "present", "two", "newly", "collected", "multimodal", "review", "datasets", "as", "benchmark", "evaluation", "resources", "for", "the", "mrhp", "task", ".", "experimental", "results", "show", "that", "our", "mcr", "method", "can", "lead", "to", "a", "performance", "increase", "of", "up", "to", "8", ".", "5", "%", "as", "compared", "to", "the", "best", "performing", "text", "-", "only", "model", ".", "the", "source", "code", "and", "datasets", "can", "be", "obtained", "from", "https", ":", "/", "/", "github", ".", "com", "/", "jhliu17", "/", "mcr", "."]}, {"venue": "ACL", "title": "Should a Chatbot be Sarcastic? Understanding User Preferences Towards Sarcasm Generation", "abstract": "Previous sarcasm generation research has focused on how to generate text that people perceive as sarcastic to create more human-like interactions. In this paper, we argue that we should first turn our attention to the question of when sarcasm should be generated, finding that humans consider sarcastic responses inappropriate to many input utterances. Next, we use a theory-driven framework for generating sarcastic responses, which allows us to control the linguistic devices included during generation. For each device, we investigate how much humans associate it with sarcasm, finding that pragmatic insincerity and emotional markers are devices crucial for making sarcasm recognisable.", "doc_id": "4cdc0b28aa540aa6d1db47b734bb8f1b", "publication_year": 2022, "sentences": ["previous sarcasm generation research has focused on how to generate text that people perceive as sarcastic to create more human - like interactions .", "in this paper , we argue that we should first turn our attention to the question of when sarcasm should be generated , finding that humans consider sarcastic responses inappropriate to many input utterances .", "next , we use a theory - driven framework for generating sarcastic responses , which allows us to control the linguistic devices included during generation .", "for each device , we investigate how much humans associate it with sarcasm , finding that pragmatic insincerity and emotional markers are devices crucial for making sarcasm recognisable ."], "events": [{"event_type": "ITT", "arguments": [{"text": "sarcasm generation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["sarcasm", "generation"], "offsets": [1, 2]}], "trigger": {"text": "focused", "tokens": ["focused"], "offsets": [5]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [61]}, {"text": "theory - driven framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["theory", "-", "driven", "framework"], "offsets": [64, 65, 66, 67]}, {"text": "generating", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["generating"], "offsets": [69]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [62]}}, {"event_type": "PUR", "arguments": [{"text": "sarcastic responses", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["sarcastic", "responses"], "offsets": [70, 71]}], "trigger": {"text": "generating", "tokens": ["generating"], "offsets": [69]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [89]}, {"text": "how much humans associate it with sarcasm", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["how", "much", "humans", "associate", "it", "with", "sarcasm"], "offsets": [91, 92, 93, 94, 95, 96, 97]}], "trigger": {"text": "investigate", "tokens": ["investigate"], "offsets": [90]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [89]}, {"text": "devices crucial", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["devices", "crucial"], "offsets": [107, 108]}], "trigger": {"text": "finding", "tokens": ["finding"], "offsets": [99]}}, {"event_type": "FAC", "arguments": [{"text": "pragmatic insincerity", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["pragmatic", "insincerity"], "offsets": [101, 102]}, {"text": "emotional markers", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["emotional", "markers"], "offsets": [104, 105]}, {"text": "making", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["making"], "offsets": [110]}], "trigger": {"text": "devices crucial", "tokens": ["devices", "crucial"], "offsets": [107, 108]}}, {"event_type": "PUR", "arguments": [{"text": "sarcasm recognisable", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["sarcasm", "recognisable"], "offsets": [111, 112]}], "trigger": {"text": "making", "tokens": ["making"], "offsets": [110]}}], "document": ["previous", "sarcasm", "generation", "research", "has", "focused", "on", "how", "to", "generate", "text", "that", "people", "perceive", "as", "sarcastic", "to", "create", "more", "human", "-", "like", "interactions", ".", "in", "this", "paper", ",", "we", "argue", "that", "we", "should", "first", "turn", "our", "attention", "to", "the", "question", "of", "when", "sarcasm", "should", "be", "generated", ",", "finding", "that", "humans", "consider", "sarcastic", "responses", "inappropriate", "to", "many", "input", "utterances", ".", "next", ",", "we", "use", "a", "theory", "-", "driven", "framework", "for", "generating", "sarcastic", "responses", ",", "which", "allows", "us", "to", "control", "the", "linguistic", "devices", "included", "during", "generation", ".", "for", "each", "device", ",", "we", "investigate", "how", "much", "humans", "associate", "it", "with", "sarcasm", ",", "finding", "that", "pragmatic", "insincerity", "and", "emotional", "markers", "are", "devices", "crucial", "for", "making", "sarcasm", "recognisable", "."]}, {"venue": "ACL", "title": "Unsupervised Neural Text Simplification", "abstract": "The paper presents a first attempt towards unsupervised neural text simplification that relies only on unlabeled text corpora. The core framework is composed of a shared encoder and a pair of attentional-decoders, crucially assisted by discrimination-based losses and denoising. The framework is trained using unlabeled text collected from en-Wikipedia dump. Our analysis (both quantitative and qualitative involving human evaluators) on public test data shows that the proposed model can perform text-simplification at both lexical and syntactic levels, competitive to existing supervised methods. It also outperforms viable unsupervised baselines. Adding a few labeled pairs helps improve the performance further.", "doc_id": "8db79efe8afe5d404f50c02386022235", "publication_year": 2019, "sentences": ["the paper presents a first attempt towards unsupervised neural text simplification that relies only on unlabeled text corpora .", "the core framework is composed of a shared encoder and a pair of attentional - decoders , crucially assisted by discrimination - based losses and denoising .", "the framework is trained using unlabeled text collected from en - wikipedia dump .", "our analysis ( both quantitative and qualitative involving human evaluators ) on public test data shows that the proposed model can perform text - simplification at both lexical and syntactic levels , competitive to existing supervised methods .", "it also outperforms viable unsupervised baselines .", "adding a few labeled pairs helps improve the performance further ."], "events": [{"event_type": "WKS", "arguments": [{"text": "unsupervised neural text simplification", "nugget_type": "APP", "argument_type": "Content", "tokens": ["unsupervised", "neural", "text", "simplification"], "offsets": [7, 8, 9, 10]}, {"text": "only on unlabeled text corpora", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["only", "on", "unlabeled", "text", "corpora"], "offsets": [13, 14, 15, 16, 17]}], "trigger": {"text": "presents", "tokens": ["presents"], "offsets": [2]}}, {"event_type": "MDS", "arguments": [{"text": "discrimination - based losses", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["discrimination", "-", "based", "losses"], "offsets": [39, 40, 41, 42]}, {"text": "discrimination - based denoising", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["discrimination", "-", "based", "denoising"], "offsets": [39, 40, 41, 44]}], "trigger": {"text": "assisted", "tokens": ["assisted"], "offsets": [37]}}, {"event_type": "MDS", "arguments": [{"text": "unlabeled text", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["unlabeled", "text"], "offsets": [51, 52]}, {"text": "en - wikipedia dump", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["en", "-", "wikipedia", "dump"], "offsets": [55, 56, 57, 58]}], "trigger": {"text": "trained", "tokens": ["trained"], "offsets": [49]}}, {"event_type": "FAC", "arguments": [{"text": "text - simplification", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["text", "-", "simplification"], "offsets": [82, 83, 84]}, {"text": "at both lexical and syntactic levels", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["at", "both", "lexical", "and", "syntactic", "levels"], "offsets": [85, 86, 87, 88, 89, 90]}, {"text": "public test data", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["public", "test", "data"], "offsets": [72, 73, 74]}, {"text": "unsupervised neural text simplification", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["unsupervised", "neural", "text", "simplification"], "offsets": [7, 8, 9, 10]}], "trigger": {"text": "perform", "tokens": ["perform"], "offsets": [81]}}, {"event_type": "CMP", "arguments": [{"text": "proposed model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["proposed", "model"], "offsets": [78, 79]}, {"text": "competitive", "nugget_type": "STR", "argument_type": "Result", "tokens": ["competitive"], "offsets": [92]}], "trigger": {"text": "competitive", "tokens": ["competitive"], "offsets": [92]}}, {"event_type": "CMP", "arguments": [{"text": "viable unsupervised baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["viable", "unsupervised", "baselines"], "offsets": [101, 102, 103]}, {"text": "unsupervised neural text simplification", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["unsupervised", "neural", "text", "simplification"], "offsets": [7, 8, 9, 10]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [100]}}, {"event_type": "FAC", "arguments": [{"text": "adding a few labeled pairs", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["adding", "a", "few", "labeled", "pairs"], "offsets": [105, 106, 107, 108, 109]}, {"text": "improve the performance further", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["improve", "the", "performance", "further"], "offsets": [111, 112, 113, 114]}], "trigger": {"text": "helps", "tokens": ["helps"], "offsets": [110]}}, {"event_type": "FIN", "arguments": [{"text": "perform", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["perform"], "offsets": [81]}], "trigger": {"text": "shows", "tokens": ["shows"], "offsets": [75]}}], "document": ["the", "paper", "presents", "a", "first", "attempt", "towards", "unsupervised", "neural", "text", "simplification", "that", "relies", "only", "on", "unlabeled", "text", "corpora", ".", "the", "core", "framework", "is", "composed", "of", "a", "shared", "encoder", "and", "a", "pair", "of", "attentional", "-", "decoders", ",", "crucially", "assisted", "by", "discrimination", "-", "based", "losses", "and", "denoising", ".", "the", "framework", "is", "trained", "using", "unlabeled", "text", "collected", "from", "en", "-", "wikipedia", "dump", ".", "our", "analysis", "(", "both", "quantitative", "and", "qualitative", "involving", "human", "evaluators", ")", "on", "public", "test", "data", "shows", "that", "the", "proposed", "model", "can", "perform", "text", "-", "simplification", "at", "both", "lexical", "and", "syntactic", "levels", ",", "competitive", "to", "existing", "supervised", "methods", ".", "it", "also", "outperforms", "viable", "unsupervised", "baselines", ".", "adding", "a", "few", "labeled", "pairs", "helps", "improve", "the", "performance", "further", "."]}, {"venue": "ACL", "title": "Controllable Open-ended Question Generation with A New Question Type Ontology", "abstract": "We investigate the less-explored task of generating open-ended questions that are typically answered by multiple sentences. We first define a new question type ontology which differentiates the nuanced nature of questions better than widely used question words. A new dataset with 4,959 questions is labeled based on the new ontology. We then propose a novel question type-aware question generation framework, augmented by a semantic graph representation, to jointly predict question focuses and produce the question. Based on this framework, we further use both exemplars and automatically generated templates to improve controllability and diversity. Experiments on two newly collected large-scale datasets show that our model improves question quality over competitive comparisons based on automatic metrics. Human judges also rate our model outputs highly in answerability, coverage of scope, and overall quality. Finally, our model variants with templates can produce questions with enhanced controllability and diversity.", "doc_id": "b84cdc20a1c81be7d99bd6eeb4368bee", "publication_year": 2021, "sentences": ["we investigate the less - explored task of generating open - ended questions that are typically answered by multiple sentences .", "we first define a new question type ontology which differentiates the nuanced nature of questions better than widely used question words .", "a new dataset with 4 , 959 questions is labeled based on the new ontology .", "we then propose a novel question type - aware question generation framework , augmented by a semantic graph representation , to jointly predict question focuses and produce the question .", "based on this framework , we further use both exemplars and automatically generated templates to improve controllability and diversity .", "experiments on two newly collected large - scale datasets show that our model improves question quality over competitive comparisons based on automatic metrics .", "human judges also rate our model outputs highly in answerability , coverage of scope , and overall quality .", "finally , our model variants with templates can produce questions with enhanced controllability and diversity ."], "events": [{"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [0]}, {"text": "generating open - ended questions", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["generating", "open", "-", "ended", "questions"], "offsets": [8, 9, 10, 11, 12]}], "trigger": {"text": "investigate", "tokens": ["investigate"], "offsets": [1]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [21]}, {"text": "new question type ontology", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["new", "question", "type", "ontology"], "offsets": [25, 26, 27, 28]}, {"text": "differentiates", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["differentiates"], "offsets": [30]}], "trigger": {"text": "define", "tokens": ["define"], "offsets": [23]}}, {"event_type": "PUR", "arguments": [{"text": "nuanced nature of questions", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["nuanced", "nature", "of", "questions"], "offsets": [32, 33, 34, 35]}], "trigger": {"text": "differentiates", "tokens": ["differentiates"], "offsets": [30]}}, {"event_type": "WKS", "arguments": [{"text": "new dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["new", "dataset"], "offsets": [44, 45]}, {"text": "based on the new ontology", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["based", "on", "the", "new", "ontology"], "offsets": [53, 54, 55, 56, 57]}], "trigger": {"text": "labeled", "tokens": ["labeled"], "offsets": [52]}}, {"event_type": "PRP", "arguments": [{"text": "predict", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["predict"], "offsets": [81]}, {"text": "produce", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["produce"], "offsets": [85]}, {"text": "question type - aware question generation framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["question", "type", "-", "aware", "question", "generation", "framework"], "offsets": [64, 65, 66, 67, 68, 69, 70]}, {"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [59]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [61]}}, {"event_type": "PUR", "arguments": [{"text": "question focuses", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["question", "focuses"], "offsets": [82, 83]}], "trigger": {"text": "predict", "tokens": ["predict"], "offsets": [81]}}, {"event_type": "PUR", "arguments": [{"text": "question", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["question"], "offsets": [87]}], "trigger": {"text": "produce", "tokens": ["produce"], "offsets": [85]}}, {"event_type": "MDS", "arguments": [{"text": "improve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improve"], "offsets": [104]}, {"text": "exemplars", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["exemplars"], "offsets": [98]}, {"text": "automatically generated templates", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["automatically", "generated", "templates"], "offsets": [100, 101, 102]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [96]}}, {"event_type": "PUR", "arguments": [{"text": "controllability", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["controllability"], "offsets": [105]}, {"text": "diversity", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["diversity"], "offsets": [107]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [104]}}, {"event_type": "CMP", "arguments": [{"text": "two newly collected large - scale datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["two", "newly", "collected", "large", "-", "scale", "datasets"], "offsets": [111, 112, 113, 114, 115, 116, 117]}, {"text": "question quality", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["question", "quality"], "offsets": [123, 124]}, {"text": "competitive comparisons based on automatic metrics", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["competitive", "comparisons", "based", "on", "automatic", "metrics"], "offsets": [126, 127, 128, 129, 130, 131]}, {"text": "novel question type - aware question generation framework", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["novel", "question", "type", "-", "aware", "question", "generation", "framework"], "offsets": [63, 64, 65, 66, 67, 68, 69, 70]}], "trigger": {"text": "improves", "tokens": ["improves"], "offsets": [122]}}, {"event_type": "FAC", "arguments": [{"text": "answerability", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["answerability"], "offsets": [142]}, {"text": "coverage of scope", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["coverage", "of", "scope"], "offsets": [144, 145, 146]}, {"text": "overall quality", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["overall", "quality"], "offsets": [149, 150]}, {"text": "question type - aware question generation framework", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["question", "type", "-", "aware", "question", "generation", "framework"], "offsets": [64, 65, 66, 67, 68, 69, 70]}], "trigger": {"text": "outputs", "tokens": ["outputs"], "offsets": [139]}}, {"event_type": "FAC", "arguments": [{"text": "model variants", "nugget_type": "MOD", "argument_type": "Subject", "tokens": ["model", "variants"], "offsets": [155, 156]}, {"text": "questions with enhanced controllability and diversity", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["questions", "with", "enhanced", "controllability", "and", "diversity"], "offsets": [161, 162, 163, 164, 165, 166]}], "trigger": {"text": "produce", "tokens": ["produce"], "offsets": [160]}}], "document": ["we", "investigate", "the", "less", "-", "explored", "task", "of", "generating", "open", "-", "ended", "questions", "that", "are", "typically", "answered", "by", "multiple", "sentences", ".", "we", "first", "define", "a", "new", "question", "type", "ontology", "which", "differentiates", "the", "nuanced", "nature", "of", "questions", "better", "than", "widely", "used", "question", "words", ".", "a", "new", "dataset", "with", "4", ",", "959", "questions", "is", "labeled", "based", "on", "the", "new", "ontology", ".", "we", "then", "propose", "a", "novel", "question", "type", "-", "aware", "question", "generation", "framework", ",", "augmented", "by", "a", "semantic", "graph", "representation", ",", "to", "jointly", "predict", "question", "focuses", "and", "produce", "the", "question", ".", "based", "on", "this", "framework", ",", "we", "further", "use", "both", "exemplars", "and", "automatically", "generated", "templates", "to", "improve", "controllability", "and", "diversity", ".", "experiments", "on", "two", "newly", "collected", "large", "-", "scale", "datasets", "show", "that", "our", "model", "improves", "question", "quality", "over", "competitive", "comparisons", "based", "on", "automatic", "metrics", ".", "human", "judges", "also", "rate", "our", "model", "outputs", "highly", "in", "answerability", ",", "coverage", "of", "scope", ",", "and", "overall", "quality", ".", "finally", ",", "our", "model", "variants", "with", "templates", "can", "produce", "questions", "with", "enhanced", "controllability", "and", "diversity", "."]}, {"venue": "ACL", "title": "Aligned Dual Channel Graph Convolutional Network for Visual Question Answering", "abstract": "Visual question answering aims to answer the natural language question about a given image. Existing graph-based methods only focus on the relations between objects in an image and neglect the importance of the syntactic dependency relations between words in a question. To simultaneously capture the relations between objects in an image and the syntactic dependency relations between words in a question, we propose a novel dual channel graph convolutional network (DC-GCN) for better combining visual and textual advantages. The DC-GCN model consists of three parts: an I-GCN module to capture the relations between objects in an image, a Q-GCN module to capture the syntactic dependency relations between words in a question, and an attention alignment module to align image representations and question representations. Experimental results show that our model achieves comparable performance with the state-of-the-art approaches.", "doc_id": "5c5e418ea0e97cab01e6f6873bf5a140", "publication_year": 2020, "sentences": ["visual question answering aims to answer the natural language question about a given image .", "existing graph - based methods only focus on the relations between objects in an image and neglect the importance of the syntactic dependency relations between words in a question .", "to simultaneously capture the relations between objects in an image and the syntactic dependency relations between words in a question , we propose a novel dual channel graph convolutional network ( dc - gcn ) for better combining visual and textual advantages .", "the dc - gcn model consists of three parts : an i - gcn module to capture the relations between objects in an image , a q - gcn module to capture the syntactic dependency relations between words in a question , and an attention alignment module to align image representations and question representations .", "experimental results show that our model achieves comparable performance with the state - of - the - art approaches ."], "events": [{"event_type": "ITT", "arguments": [{"text": "visual question answering", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["visual", "question", "answering"], "offsets": [0, 1, 2]}], "trigger": {"text": "aims", "tokens": ["aims"], "offsets": [3]}}, {"event_type": "RWF", "arguments": [{"text": "existing graph - based methods", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["existing", "graph", "-", "based", "methods"], "offsets": [15, 16, 17, 18, 19]}, {"text": "only focus", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["only", "focus"], "offsets": [20, 21]}], "trigger": {"text": "only focus", "tokens": ["only", "focus"], "offsets": [20, 21]}}, {"event_type": "RWF", "arguments": [{"text": "existing graph - based methods", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["existing", "graph", "-", "based", "methods"], "offsets": [15, 16, 17, 18, 19]}, {"text": "neglect", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["neglect"], "offsets": [31]}], "trigger": {"text": "neglect", "tokens": ["neglect"], "offsets": [31]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [66]}, {"text": "dual channel graph convolutional network", "nugget_type": "APP", "argument_type": "Content", "tokens": ["dual", "channel", "graph", "convolutional", "network"], "offsets": [70, 71, 72, 73, 74]}, {"text": "combining", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["combining"], "offsets": [82]}, {"text": "capture", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["capture"], "offsets": [47]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [67]}}, {"event_type": "PUR", "arguments": [{"text": "visual and textual advantages", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["visual", "and", "textual", "advantages"], "offsets": [83, 84, 85, 86]}], "trigger": {"text": "combining", "tokens": ["combining"], "offsets": [82]}}, {"event_type": "PUR", "arguments": [{"text": "relations between objects in an image and the syntactic dependency relations between words in a question", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["relations", "between", "objects", "in", "an", "image", "and", "the", "syntactic", "dependency", "relations", "between", "words", "in", "a", "question"], "offsets": [49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64]}], "trigger": {"text": "capture", "tokens": ["capture"], "offsets": [47]}}, {"event_type": "MDS", "arguments": [{"text": "i - gcn module", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["i", "-", "gcn", "module"], "offsets": [99, 100, 101, 102]}, {"text": "relations between objects in an image", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["relations", "between", "objects", "in", "an", "image"], "offsets": [106, 107, 108, 109, 110, 111]}], "trigger": {"text": "capture", "tokens": ["capture"], "offsets": [104]}}, {"event_type": "MDS", "arguments": [{"text": "q - gcn module", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["q", "-", "gcn", "module"], "offsets": [114, 115, 116, 117]}, {"text": "syntactic dependency relations between words in a question", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["syntactic", "dependency", "relations", "between", "words", "in", "a", "question"], "offsets": [121, 122, 123, 124, 125, 126, 127, 128]}], "trigger": {"text": "capture", "tokens": ["capture"], "offsets": [119]}}, {"event_type": "MDS", "arguments": [{"text": "attention alignment module", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["attention", "alignment", "module"], "offsets": [132, 133, 134]}, {"text": "image representations", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["image", "representations"], "offsets": [137, 138]}, {"text": "question representations", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["question", "representations"], "offsets": [140, 141]}], "trigger": {"text": "align", "tokens": ["align"], "offsets": [136]}}, {"event_type": "FIN", "arguments": [{"text": "achieves", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["achieves"], "offsets": [149]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [145]}}, {"event_type": "FAC", "arguments": [{"text": "dual channel graph convolutional network", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["dual", "channel", "graph", "convolutional", "network"], "offsets": [70, 71, 72, 73, 74]}, {"text": "comparable performance with the state - of - the - art approaches", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["comparable", "performance", "with", "the", "state", "-", "of", "-", "the", "-", "art", "approaches"], "offsets": [150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [149]}}], "document": ["visual", "question", "answering", "aims", "to", "answer", "the", "natural", "language", "question", "about", "a", "given", "image", ".", "existing", "graph", "-", "based", "methods", "only", "focus", "on", "the", "relations", "between", "objects", "in", "an", "image", "and", "neglect", "the", "importance", "of", "the", "syntactic", "dependency", "relations", "between", "words", "in", "a", "question", ".", "to", "simultaneously", "capture", "the", "relations", "between", "objects", "in", "an", "image", "and", "the", "syntactic", "dependency", "relations", "between", "words", "in", "a", "question", ",", "we", "propose", "a", "novel", "dual", "channel", "graph", "convolutional", "network", "(", "dc", "-", "gcn", ")", "for", "better", "combining", "visual", "and", "textual", "advantages", ".", "the", "dc", "-", "gcn", "model", "consists", "of", "three", "parts", ":", "an", "i", "-", "gcn", "module", "to", "capture", "the", "relations", "between", "objects", "in", "an", "image", ",", "a", "q", "-", "gcn", "module", "to", "capture", "the", "syntactic", "dependency", "relations", "between", "words", "in", "a", "question", ",", "and", "an", "attention", "alignment", "module", "to", "align", "image", "representations", "and", "question", "representations", ".", "experimental", "results", "show", "that", "our", "model", "achieves", "comparable", "performance", "with", "the", "state", "-", "of", "-", "the", "-", "art", "approaches", "."]}, {"venue": "ACL", "title": "Quantifying Similarity between Relations with Fact Distribution", "abstract": "We introduce a conceptually simple and effective method to quantify the similarity between relations in knowledge bases. Specifically, our approach is based on the divergence between the conditional probability distributions over entity pairs. In this paper, these distributions are parameterized by a very simple neural network. Although computing the exact similarity is in-tractable, we provide a sampling-based method to get a good approximation. We empirically show the outputs of our approach significantly correlate with human judgments. By applying our method to various tasks, we also find that (1) our approach could effectively detect redundant relations extracted by open information extraction (Open IE) models, that (2) even the most competitive models for relational classification still make mistakes among very similar relations, and that (3) our approach could be incorporated into negative sampling and softmax classification to alleviate these mistakes.", "doc_id": "144cdf350ba47cf715a4afe6239e0def", "publication_year": 2019, "sentences": ["we introduce a conceptually simple and effective method to quantify the similarity between relations in knowledge bases .", "specifically , our approach is based on the divergence between the conditional probability distributions over entity pairs .", "in this paper , these distributions are parameterized by a very simple neural network .", "although computing the exact similarity is in - tractable , we provide a sampling - based method to get a good approximation .", "we empirically show the outputs of our approach significantly correlate with human judgments .", "by applying our method to various tasks , we also find that ( 1 ) our approach could effectively detect redundant relations extracted by open information extraction ( open ie ) models , that ( 2 ) even the most competitive models for relational classification still make mistakes among very similar relations , and that ( 3 ) our approach could be incorporated into negative sampling and softmax classification to alleviate these mistakes ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [0]}, {"text": "conceptually simple and effective method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["conceptually", "simple", "and", "effective", "method"], "offsets": [3, 4, 5, 6, 7]}, {"text": "quantify", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["quantify"], "offsets": [9]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [1]}}, {"event_type": "PUR", "arguments": [{"text": "similarity between relations in knowledge bases", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["similarity", "between", "relations", "in", "knowledge", "bases"], "offsets": [11, 12, 13, 14, 15, 16]}], "trigger": {"text": "quantify", "tokens": ["quantify"], "offsets": [9]}}, {"event_type": "WKS", "arguments": [{"text": "divergence between the conditional probability distributions over entity pairs", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["divergence", "between", "the", "conditional", "probability", "distributions", "over", "entity", "pairs"], "offsets": [26, 27, 28, 29, 30, 31, 32, 33, 34]}], "trigger": {"text": "based on", "tokens": ["based", "on"], "offsets": [23, 24]}}, {"event_type": "MDS", "arguments": [{"text": "simple neural network", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["simple", "neural", "network"], "offsets": [47, 48, 49]}, {"text": "conditional probability distributions", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["conditional", "probability", "distributions"], "offsets": [29, 30, 31]}], "trigger": {"text": "parameterized", "tokens": ["parameterized"], "offsets": [43]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [61]}, {"text": "sampling - based method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["sampling", "-", "based", "method"], "offsets": [64, 65, 66, 67]}, {"text": "get", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["get"], "offsets": [69]}], "trigger": {"text": "provide", "tokens": ["provide"], "offsets": [62]}}, {"event_type": "PUR", "arguments": [{"text": "good approximation", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["good", "approximation"], "offsets": [71, 72]}], "trigger": {"text": "get", "tokens": ["get"], "offsets": [69]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [74]}, {"text": "significantly correlate with", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["significantly", "correlate", "with"], "offsets": [82, 83, 84]}], "trigger": {"text": "empirically show", "tokens": ["empirically", "show"], "offsets": [75, 76]}}, {"event_type": "FAC", "arguments": [{"text": "outputs of our approach", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["outputs", "of", "conceptually", "simple", "and", "effective", "method"], "offsets": [78, 79, 3, 4, 5, 6, 7]}, {"text": "human judgments", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["human", "judgments"], "offsets": [85, 86]}], "trigger": {"text": "significantly correlate with", "tokens": ["significantly", "correlate", "with"], "offsets": [82, 83, 84]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [96]}, {"text": "effectively detect", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["effectively", "detect"], "offsets": [106, 107]}, {"text": "incorporated", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["incorporated"], "offsets": [150]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [98]}}, {"event_type": "FAC", "arguments": [{"text": "conceptually simple and effective method", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["conceptually", "simple", "and", "effective", "method"], "offsets": [3, 4, 5, 6, 7]}, {"text": "redundant relations extracted by open information extraction ( open ie ) models", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["redundant", "relations", "extracted", "by", "open", "information", "extraction", "models"], "offsets": [108, 109, 110, 111, 112, 113, 114, 119]}], "trigger": {"text": "effectively detect", "tokens": ["effectively", "detect"], "offsets": [106, 107]}}, {"event_type": "FAC", "arguments": [{"text": "conceptually simple and effective method", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["conceptually", "simple", "and", "effective", "method"], "offsets": [3, 4, 5, 6, 7]}, {"text": "negative sampling", "nugget_type": "MOD", "argument_type": "Object", "tokens": ["negative", "sampling"], "offsets": [152, 153]}, {"text": "softmax classification", "nugget_type": "MOD", "argument_type": "Object", "tokens": ["softmax", "classification"], "offsets": [155, 156]}, {"text": "alleviate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["alleviate"], "offsets": [158]}], "trigger": {"text": "incorporated", "tokens": ["incorporated"], "offsets": [150]}}, {"event_type": "PUR", "arguments": [{"text": "mistakes", "nugget_type": "WEA", "argument_type": "Aim", "tokens": ["mistakes"], "offsets": [135]}], "trigger": {"text": "alleviate", "tokens": ["alleviate"], "offsets": [158]}}], "document": ["we", "introduce", "a", "conceptually", "simple", "and", "effective", "method", "to", "quantify", "the", "similarity", "between", "relations", "in", "knowledge", "bases", ".", "specifically", ",", "our", "approach", "is", "based", "on", "the", "divergence", "between", "the", "conditional", "probability", "distributions", "over", "entity", "pairs", ".", "in", "this", "paper", ",", "these", "distributions", "are", "parameterized", "by", "a", "very", "simple", "neural", "network", ".", "although", "computing", "the", "exact", "similarity", "is", "in", "-", "tractable", ",", "we", "provide", "a", "sampling", "-", "based", "method", "to", "get", "a", "good", "approximation", ".", "we", "empirically", "show", "the", "outputs", "of", "our", "approach", "significantly", "correlate", "with", "human", "judgments", ".", "by", "applying", "our", "method", "to", "various", "tasks", ",", "we", "also", "find", "that", "(", "1", ")", "our", "approach", "could", "effectively", "detect", "redundant", "relations", "extracted", "by", "open", "information", "extraction", "(", "open", "ie", ")", "models", ",", "that", "(", "2", ")", "even", "the", "most", "competitive", "models", "for", "relational", "classification", "still", "make", "mistakes", "among", "very", "similar", "relations", ",", "and", "that", "(", "3", ")", "our", "approach", "could", "be", "incorporated", "into", "negative", "sampling", "and", "softmax", "classification", "to", "alleviate", "these", "mistakes", "."]}, {"venue": "ACL", "title": "SkipBERT: Efficient Inference with Shallow Layer Skipping", "abstract": "In this paper, we propose SkipBERT to accelerate BERT inference by skipping the computation of shallow layers. To achieve this, our approach encodes small text chunks into independent representations, which are then materialized to approximate the shallow representation of BERT. Since the use of such approximation is inexpensive compared with transformer calculations, we leverage it to replace the shallow layers of BERT to skip their runtime overhead. With off-the-shelf early exit mechanisms, we also skip redundant computation from the highest few layers to further improve inference efficiency. Results on GLUE show that our approach can reduce latency by 65% without sacrificing performance. By using only two-layer transformer calculations, we can still maintain 95% accuracy of BERT.", "doc_id": "b3e3737fd8c075fa02b422c3cb5bcab8", "publication_year": 2022, "sentences": ["in this paper , we propose skipbert to accelerate bert inference by skipping the computation of shallow layers .", "to achieve this , our approach encodes small text chunks into independent representations , which are then materialized to approximate the shallow representation of bert .", "since the use of such approximation is inexpensive compared with transformer calculations , we leverage it to replace the shallow layers of bert to skip their runtime overhead .", "with off - the - shelf early exit mechanisms , we also skip redundant computation from the highest few layers to further improve inference efficiency .", "results on glue show that our approach can reduce latency by 65 % without sacrificing performance .", "by using only two - layer transformer calculations , we can still maintain 95 % accuracy of bert ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [4]}, {"text": "skipbert", "nugget_type": "APP", "argument_type": "Content", "tokens": ["skipbert"], "offsets": [6]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [5]}}, {"event_type": "MDS", "arguments": [{"text": "computation of shallow layers", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["computation", "of", "shallow", "layers"], "offsets": [14, 15, 16, 17]}, {"text": "accelerate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["accelerate"], "offsets": [8]}], "trigger": {"text": "skipping", "tokens": ["skipping"], "offsets": [12]}}, {"event_type": "PUR", "arguments": [{"text": "bert inference", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["bert", "inference"], "offsets": [9, 10]}], "trigger": {"text": "accelerate", "tokens": ["accelerate"], "offsets": [8]}}, {"event_type": "MDS", "arguments": [{"text": "small text chunks", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["small", "text", "chunks"], "offsets": [26, 27, 28]}, {"text": "independent representations", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["independent", "representations"], "offsets": [30, 31]}], "trigger": {"text": "encodes", "tokens": ["encodes"], "offsets": [25]}}, {"event_type": "MDS", "arguments": [{"text": "independent representations", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["independent", "representations"], "offsets": [30, 31]}, {"text": "shallow representation of bert", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["shallow", "representation", "of", "bert"], "offsets": [40, 41, 42, 43]}], "trigger": {"text": "materialized", "tokens": ["materialized"], "offsets": [36]}}, {"event_type": "MDS", "arguments": [{"text": "such approximation", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["such", "approximation"], "offsets": [49, 50]}, {"text": "shallow layers of bert", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["shallow", "layers", "of", "bert"], "offsets": [64, 65, 66, 67]}, {"text": "skip", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["skip"], "offsets": [69]}], "trigger": {"text": "replace", "tokens": ["replace"], "offsets": [62]}}, {"event_type": "PUR", "arguments": [{"text": "runtime overhead", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["runtime", "overhead"], "offsets": [71, 72]}], "trigger": {"text": "skip", "tokens": ["skip"], "offsets": [69]}}, {"event_type": "WKS", "arguments": [{"text": "with off - the - shelf early exit mechanisms", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "off", "-", "the", "-", "shelf", "early", "exit", "mechanisms"], "offsets": [74, 75, 76, 77, 78, 79, 80, 81, 82]}, {"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [84]}, {"text": "redundant computation from the highest few layers", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["redundant", "computation", "from", "the", "highest", "few", "layers"], "offsets": [87, 88, 89, 90, 91, 92, 93]}, {"text": "improve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improve"], "offsets": [96]}], "trigger": {"text": "skip", "tokens": ["skip"], "offsets": [86]}}, {"event_type": "PUR", "arguments": [{"text": "inference efficiency", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["inference", "efficiency"], "offsets": [97, 98]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [96]}}, {"event_type": "FAC", "arguments": [{"text": "latency", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["latency"], "offsets": [109]}, {"text": "65 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["65", "%"], "offsets": [111, 112]}, {"text": "without sacrificing performance", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["without", "sacrificing", "performance"], "offsets": [113, 114, 115]}, {"text": "skipbert", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["skipbert"], "offsets": [6]}, {"text": "on glue", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "glue"], "offsets": [101, 102]}], "trigger": {"text": "reduce", "tokens": ["reduce"], "offsets": [108]}}, {"event_type": "FAC", "arguments": [{"text": "95 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["95", "%"], "offsets": [130, 131]}, {"text": "accuracy of bert", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["accuracy", "of", "bert"], "offsets": [132, 133, 134]}, {"text": "by using only two - layer transformer calculations", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["by", "using", "only", "two", "-", "layer", "transformer", "calculations"], "offsets": [117, 118, 119, 120, 121, 122, 123, 124]}], "trigger": {"text": "maintain", "tokens": ["maintain"], "offsets": [129]}}], "document": ["in", "this", "paper", ",", "we", "propose", "skipbert", "to", "accelerate", "bert", "inference", "by", "skipping", "the", "computation", "of", "shallow", "layers", ".", "to", "achieve", "this", ",", "our", "approach", "encodes", "small", "text", "chunks", "into", "independent", "representations", ",", "which", "are", "then", "materialized", "to", "approximate", "the", "shallow", "representation", "of", "bert", ".", "since", "the", "use", "of", "such", "approximation", "is", "inexpensive", "compared", "with", "transformer", "calculations", ",", "we", "leverage", "it", "to", "replace", "the", "shallow", "layers", "of", "bert", "to", "skip", "their", "runtime", "overhead", ".", "with", "off", "-", "the", "-", "shelf", "early", "exit", "mechanisms", ",", "we", "also", "skip", "redundant", "computation", "from", "the", "highest", "few", "layers", "to", "further", "improve", "inference", "efficiency", ".", "results", "on", "glue", "show", "that", "our", "approach", "can", "reduce", "latency", "by", "65", "%", "without", "sacrificing", "performance", ".", "by", "using", "only", "two", "-", "layer", "transformer", "calculations", ",", "we", "can", "still", "maintain", "95", "%", "accuracy", "of", "bert", "."]}, {"venue": "ACL", "title": "Learning Spoken Language Representations with Neural Lattice Language Modeling", "abstract": "Pre-trained language models have achieved huge improvement on many NLP tasks. However, these methods are usually designed for written text, so they do not consider the properties of spoken language. Therefore, this paper aims at generalizing the idea of language model pre-training to lattices generated by recognition systems. We propose a framework that trains neural lattice language models to provide contextualized representations for spoken language understanding tasks. The proposed two-stage pre-training approach reduces the demands of speech data and has better efficiency. Experiments on intent detection and dialogue act recognition datasets demonstrate that our proposed method consistently outperforms strong baselines when evaluated on spoken inputs. The code is available at https://github.com/MiuLab/Lattice-ELMo.", "doc_id": "d3a28e02e31534f8acca3309218a3c7a", "publication_year": 2020, "sentences": ["pre - trained language models have achieved huge improvement on many nlp tasks .", "however , these methods are usually designed for written text , so they do not consider the properties of spoken language .", "therefore , this paper aims at generalizing the idea of language model pre - training to lattices generated by recognition systems .", "we propose a framework that trains neural lattice language models to provide contextualized representations for spoken language understanding tasks .", "the proposed two - stage pre - training approach reduces the demands of speech data and has better efficiency .", "experiments on intent detection and dialogue act recognition datasets demonstrate that our proposed method consistently outperforms strong baselines when evaluated on spoken inputs .", "the code is available at https : / / github . com / miulab / lattice - elmo ."], "events": [{"event_type": "ITT", "arguments": [{"text": "pre - trained language models", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["pre", "-", "trained", "language", "models"], "offsets": [0, 1, 2, 3, 4]}], "trigger": {"text": "achieved", "tokens": ["achieved"], "offsets": [6]}}, {"event_type": "RWF", "arguments": [{"text": "not consider", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["not", "consider"], "offsets": [28, 29]}, {"text": "properties of spoken language", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["properties", "of", "spoken", "language"], "offsets": [31, 32, 33, 34]}], "trigger": {"text": "not consider", "tokens": ["not", "consider"], "offsets": [28, 29]}}, {"event_type": "WKS", "arguments": [{"text": "idea of language model pre - training", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["idea", "of", "language", "model", "pre", "-", "training"], "offsets": [44, 45, 46, 47, 48, 49, 50]}, {"text": "lattices", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["lattices"], "offsets": [52]}], "trigger": {"text": "generalizing", "tokens": ["generalizing"], "offsets": [42]}}, {"event_type": "MDS", "arguments": [{"text": "lattices", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["lattices"], "offsets": [52]}, {"text": "recognition systems", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["recognition", "systems"], "offsets": [55, 56]}], "trigger": {"text": "generated", "tokens": ["generated"], "offsets": [53]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [58]}, {"text": "framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["framework"], "offsets": [61]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [59]}}, {"event_type": "MDS", "arguments": [{"text": "neural lattice language models", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["neural", "lattice", "language", "models"], "offsets": [64, 65, 66, 67]}, {"text": "provide", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["provide"], "offsets": [69]}, {"text": "spoken language understanding tasks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["spoken", "language", "understanding", "tasks"], "offsets": [73, 74, 75, 76]}], "trigger": {"text": "trains", "tokens": ["trains"], "offsets": [63]}}, {"event_type": "PUR", "arguments": [{"text": "contextualized representations", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["contextualized", "representations"], "offsets": [70, 71]}], "trigger": {"text": "provide", "tokens": ["provide"], "offsets": [69]}}, {"event_type": "FAC", "arguments": [{"text": "proposed two - stage pre - training approach", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["proposed", "two", "-", "stage", "pre", "-", "training", "approach"], "offsets": [79, 80, 81, 82, 83, 84, 85, 86]}, {"text": "demands of speech data", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["demands", "of", "speech", "data"], "offsets": [89, 90, 91, 92]}], "trigger": {"text": "reduces", "tokens": ["reduces"], "offsets": [87]}}, {"event_type": "CMP", "arguments": [{"text": "proposed two - stage pre - training approach", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["proposed", "two", "-", "stage", "pre", "-", "training", "approach"], "offsets": [79, 80, 81, 82, 83, 84, 85, 86]}, {"text": "better", "nugget_type": "STR", "argument_type": "Result", "tokens": ["better"], "offsets": [95]}, {"text": "efficiency", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["efficiency"], "offsets": [96]}], "trigger": {"text": "has", "tokens": ["has"], "offsets": [94]}}, {"event_type": "FIN", "arguments": [{"text": "outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforms"], "offsets": [113]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [107]}}, {"event_type": "CMP", "arguments": [{"text": "proposed two - stage pre - training approach", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["proposed", "two", "-", "stage", "pre", "-", "training", "approach"], "offsets": [79, 80, 81, 82, 83, 84, 85, 86]}, {"text": "consistently", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["consistently"], "offsets": [112]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [113]}, {"text": "strong baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["strong", "baselines"], "offsets": [114, 115]}, {"text": "when evaluated on spoken inputs", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "evaluated", "on", "spoken", "inputs"], "offsets": [116, 117, 118, 119, 120]}, {"text": "intent detection datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["intent", "detection", "datasets"], "offsets": [100, 101, 106]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [113]}}], "document": ["pre", "-", "trained", "language", "models", "have", "achieved", "huge", "improvement", "on", "many", "nlp", "tasks", ".", "however", ",", "these", "methods", "are", "usually", "designed", "for", "written", "text", ",", "so", "they", "do", "not", "consider", "the", "properties", "of", "spoken", "language", ".", "therefore", ",", "this", "paper", "aims", "at", "generalizing", "the", "idea", "of", "language", "model", "pre", "-", "training", "to", "lattices", "generated", "by", "recognition", "systems", ".", "we", "propose", "a", "framework", "that", "trains", "neural", "lattice", "language", "models", "to", "provide", "contextualized", "representations", "for", "spoken", "language", "understanding", "tasks", ".", "the", "proposed", "two", "-", "stage", "pre", "-", "training", "approach", "reduces", "the", "demands", "of", "speech", "data", "and", "has", "better", "efficiency", ".", "experiments", "on", "intent", "detection", "and", "dialogue", "act", "recognition", "datasets", "demonstrate", "that", "our", "proposed", "method", "consistently", "outperforms", "strong", "baselines", "when", "evaluated", "on", "spoken", "inputs", ".", "the", "code", "is", "available", "at", "https", ":", "/", "/", "github", ".", "com", "/", "miulab", "/", "lattice", "-", "elmo", "."]}, {"venue": "ACL", "title": "Don\u2019t Eclipse Your Arts Due to Small Discrepancies: Boundary Repositioning with a Pointer Network for Aspect Extraction", "abstract": "The current aspect extraction methods suffer from boundary errors. In general, these errors lead to a relatively minor difference between the extracted aspects and the ground-truth. However, they hurt the performance severely. In this paper, we propose to utilize a pointer network for repositioning the boundaries. Recycling mechanism is used, which enables the training data to be collected without manual intervention. We conduct the experiments on the benchmark datasets SE14 of laptop and SE14-16 of restaurant. Experimental results show that our method achieves substantial improvements over the baseline, and outperforms state-of-the-art methods.", "doc_id": "f635725fae2325b8b086bb7a8f11ed31", "publication_year": 2020, "sentences": ["the current aspect extraction methods suffer from boundary errors .", "in general , these errors lead to a relatively minor difference between the extracted aspects and the ground - truth .", "however , they hurt the performance severely .", "in this paper , we propose to utilize a pointer network for repositioning the boundaries .", "recycling mechanism is used , which enables the training data to be collected without manual intervention .", "we conduct the experiments on the benchmark datasets se14 of laptop and se14 - 16 of restaurant .", "experimental results show that our method achieves substantial improvements over the baseline , and outperforms state - of - the - art methods ."], "events": [{"event_type": "RWF", "arguments": [{"text": "current aspect extraction methods", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["current", "aspect", "extraction", "methods"], "offsets": [1, 2, 3, 4]}, {"text": "suffer", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["suffer"], "offsets": [5]}], "trigger": {"text": "suffer", "tokens": ["suffer"], "offsets": [5]}}, {"event_type": "RWF", "arguments": [{"text": "relatively minor difference between the extracted aspects and the ground - truth", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["relatively", "minor", "difference", "between", "the", "extracted", "aspects", "and", "the", "ground", "-", "truth"], "offsets": [18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]}, {"text": "boundary errors", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["boundary", "errors"], "offsets": [7, 8]}], "trigger": {"text": "lead", "tokens": ["lead"], "offsets": [15]}}, {"event_type": "RWF", "arguments": [{"text": "hurt", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["hurt"], "offsets": [34]}, {"text": "boundary errors", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["boundary", "errors"], "offsets": [7, 8]}], "trigger": {"text": "hurt", "tokens": ["hurt"], "offsets": [34]}}, {"event_type": "MDS", "arguments": [{"text": "pointer network", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["pointer", "network"], "offsets": [48, 49]}, {"text": "repositioning", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["repositioning"], "offsets": [51]}], "trigger": {"text": "utilize", "tokens": ["utilize"], "offsets": [46]}}, {"event_type": "PUR", "arguments": [{"text": "boundaries", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["boundaries"], "offsets": [53]}], "trigger": {"text": "repositioning", "tokens": ["repositioning"], "offsets": [51]}}, {"event_type": "MDS", "arguments": [{"text": "recycling mechanism", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["recycling", "mechanism"], "offsets": [55, 56]}, {"text": "collected", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["collected"], "offsets": [67]}], "trigger": {"text": "used", "tokens": ["used"], "offsets": [58]}}, {"event_type": "PUR", "arguments": [{"text": "training data", "nugget_type": "DST", "argument_type": "Aim", "tokens": ["training", "data"], "offsets": [63, 64]}, {"text": "without manual intervention", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["without", "manual", "intervention"], "offsets": [68, 69, 70]}], "trigger": {"text": "collected", "tokens": ["collected"], "offsets": [67]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [72]}, {"text": "experiments", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["experiments"], "offsets": [75]}, {"text": "se14", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["se14"], "offsets": [80]}, {"text": "se14 - 16", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["se14", "-", "16"], "offsets": [84, 85, 86]}], "trigger": {"text": "conduct", "tokens": ["conduct"], "offsets": [73]}}, {"event_type": "FIN", "arguments": [{"text": "outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforms"], "offsets": [104]}, {"text": "achieves", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["achieves"], "offsets": [96]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [92]}}, {"event_type": "CMP", "arguments": [{"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [104]}, {"text": "state - of - the - art methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art", "methods"], "offsets": [105, 106, 107, 108, 109, 110, 111, 112]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [104]}}, {"event_type": "FAC", "arguments": [{"text": "substantial improvements", "nugget_type": "STR", "argument_type": "Object", "tokens": ["substantial", "improvements"], "offsets": [97, 98]}, {"text": "over the baseline", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["over", "the", "baseline"], "offsets": [99, 100, 101]}, {"text": "pointer network", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["pointer", "network"], "offsets": [48, 49]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [96]}}], "document": ["the", "current", "aspect", "extraction", "methods", "suffer", "from", "boundary", "errors", ".", "in", "general", ",", "these", "errors", "lead", "to", "a", "relatively", "minor", "difference", "between", "the", "extracted", "aspects", "and", "the", "ground", "-", "truth", ".", "however", ",", "they", "hurt", "the", "performance", "severely", ".", "in", "this", "paper", ",", "we", "propose", "to", "utilize", "a", "pointer", "network", "for", "repositioning", "the", "boundaries", ".", "recycling", "mechanism", "is", "used", ",", "which", "enables", "the", "training", "data", "to", "be", "collected", "without", "manual", "intervention", ".", "we", "conduct", "the", "experiments", "on", "the", "benchmark", "datasets", "se14", "of", "laptop", "and", "se14", "-", "16", "of", "restaurant", ".", "experimental", "results", "show", "that", "our", "method", "achieves", "substantial", "improvements", "over", "the", "baseline", ",", "and", "outperforms", "state", "-", "of", "-", "the", "-", "art", "methods", "."]}, {"venue": "ACL", "title": "Inferring Concept Hierarchies from Text Corpora via Hyperbolic Embeddings", "abstract": "We consider the task of inferring \u201cis-a\u201d relationships from large text corpora. For this purpose, we propose a new method combining hyperbolic embeddings and Hearst patterns. This approach allows us to set appropriate constraints for inferring concept hierarchies from distributional contexts while also being able to predict missing \u201cis-a\u201d-relationships and to correct wrong extractions. Moreover \u2013 and in contrast with other methods \u2013 the hierarchical nature of hyperbolic space allows us to learn highly efficient representations and to improve the taxonomic consistency of the inferred hierarchies. Experimentally, we show that our approach achieves state-of-the-art performance on several commonly-used benchmarks.", "doc_id": "588d7387ea7b85a91a5d804c06ba3845", "publication_year": 2019, "sentences": ["we consider the task of inferring \u201c is - a \u201d relationships from large text corpora .", "for this purpose , we propose a new method combining hyperbolic embeddings and hearst patterns .", "this approach allows us to set appropriate constraints for inferring concept hierarchies from distributional contexts while also being able to predict missing \u201c is - a \u201d - relationships and to correct wrong extractions .", "moreover \u2013 and in contrast with other methods \u2013 the hierarchical nature of hyperbolic space allows us to learn highly efficient representations and to improve the taxonomic consistency of the inferred hierarchies .", "experimentally , we show that our approach achieves state - of - the - art performance on several commonly - used benchmarks ."], "events": [{"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [0]}, {"text": "task of inferring \u201c is - a \u201d relationships", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["task", "of", "inferring", "\u201c", "is", "-", "a", "\u201d", "relationships"], "offsets": [3, 4, 5, 6, 7, 8, 9, 10, 11]}, {"text": "large text corpora", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["large", "text", "corpora"], "offsets": [13, 14, 15]}], "trigger": {"text": "consider", "tokens": ["consider"], "offsets": [1]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [21]}, {"text": "method combining hyperbolic embeddings and hearst patterns", "nugget_type": "APP", "argument_type": "Content", "tokens": ["method", "combining", "hyperbolic", "embeddings", "and", "hearst", "patterns"], "offsets": [25, 26, 27, 28, 29, 30, 31]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [22]}}, {"event_type": "WKS", "arguments": [{"text": "inferring", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["inferring"], "offsets": [42]}], "trigger": {"text": "set", "tokens": ["set"], "offsets": [38]}}, {"event_type": "PUR", "arguments": [{"text": "concept hierarchies", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["concept", "hierarchies"], "offsets": [43, 44]}], "trigger": {"text": "inferring", "tokens": ["inferring"], "offsets": [42]}}, {"event_type": "MDS", "arguments": [{"text": "missing \u201c is - a \u201d - relationships", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["missing", "\u201c", "is", "-", "a", "\u201d", "-", "relationships"], "offsets": [54, 55, 56, 57, 58, 59, 60, 61]}], "trigger": {"text": "predict", "tokens": ["predict"], "offsets": [53]}}, {"event_type": "MDS", "arguments": [{"text": "wrong extractions", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["wrong", "extractions"], "offsets": [65, 66]}], "trigger": {"text": "correct", "tokens": ["correct"], "offsets": [64]}}, {"event_type": "FAC", "arguments": [{"text": "hierarchical nature of hyperbolic space", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["hierarchical", "nature", "of", "hyperbolic", "space"], "offsets": [78, 79, 80, 81, 82]}, {"text": "highly efficient representations", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["highly", "efficient", "representations"], "offsets": [87, 88, 89]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [86]}}, {"event_type": "FAC", "arguments": [{"text": "hierarchical nature of hyperbolic space", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["hierarchical", "nature", "of", "hyperbolic", "space"], "offsets": [78, 79, 80, 81, 82]}, {"text": "taxonomic consistency of the inferred hierarchies", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["taxonomic", "consistency", "of", "the", "inferred", "hierarchies"], "offsets": [94, 95, 96, 97, 98, 99]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [92]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [103]}, {"text": "achieves", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["achieves"], "offsets": [108]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [104]}}, {"event_type": "FAC", "arguments": [{"text": "performance", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["performance"], "offsets": [116]}, {"text": "on several commonly - used benchmarks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "several", "commonly", "-", "used", "benchmarks"], "offsets": [117, 118, 119, 120, 121, 122]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [108]}}], "document": ["we", "consider", "the", "task", "of", "inferring", "\u201c", "is", "-", "a", "\u201d", "relationships", "from", "large", "text", "corpora", ".", "for", "this", "purpose", ",", "we", "propose", "a", "new", "method", "combining", "hyperbolic", "embeddings", "and", "hearst", "patterns", ".", "this", "approach", "allows", "us", "to", "set", "appropriate", "constraints", "for", "inferring", "concept", "hierarchies", "from", "distributional", "contexts", "while", "also", "being", "able", "to", "predict", "missing", "\u201c", "is", "-", "a", "\u201d", "-", "relationships", "and", "to", "correct", "wrong", "extractions", ".", "moreover", "\u2013", "and", "in", "contrast", "with", "other", "methods", "\u2013", "the", "hierarchical", "nature", "of", "hyperbolic", "space", "allows", "us", "to", "learn", "highly", "efficient", "representations", "and", "to", "improve", "the", "taxonomic", "consistency", "of", "the", "inferred", "hierarchies", ".", "experimentally", ",", "we", "show", "that", "our", "approach", "achieves", "state", "-", "of", "-", "the", "-", "art", "performance", "on", "several", "commonly", "-", "used", "benchmarks", "."]}, {"venue": "ACL", "title": "Joint Slot Filling and Intent Detection via Capsule Neural Networks", "abstract": "Being able to recognize words as slots and detect the intent of an utterance has been a keen issue in natural language understanding. The existing works either treat slot filling and intent detection separately in a pipeline manner, or adopt joint models which sequentially label slots while summarizing the utterance-level intent without explicitly preserving the hierarchical relationship among words, slots, and intents. To exploit the semantic hierarchy for effective modeling, we propose a capsule-based neural network model which accomplishes slot filling and intent detection via a dynamic routing-by-agreement schema. A re-routing schema is proposed to further synergize the slot filling performance using the inferred intent representation. Experiments on two real-world datasets show the effectiveness of our model when compared with other alternative model architectures, as well as existing natural language understanding services.", "doc_id": "60762654fbfa8ecc0e80d9acd0bd66ba", "publication_year": 2019, "sentences": ["being able to recognize words as slots and detect the intent of an utterance has been a keen issue in natural language understanding .", "the existing works either treat slot filling and intent detection separately in a pipeline manner , or adopt joint models which sequentially label slots while summarizing the utterance - level intent without explicitly preserving the hierarchical relationship among words , slots , and intents .", "to exploit the semantic hierarchy for effective modeling , we propose a capsule - based neural network model which accomplishes slot filling and intent detection via a dynamic routing - by - agreement schema .", "a re - routing schema is proposed to further synergize the slot filling performance using the inferred intent representation .", "experiments on two real - world datasets show the effectiveness of our model when compared with other alternative model architectures , as well as existing natural language understanding services ."], "events": [{"event_type": "ITT", "arguments": [{"text": "natural language understanding", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["natural", "language", "understanding"], "offsets": [20, 21, 22]}], "trigger": {"text": "issue", "tokens": ["issue"], "offsets": [18]}}, {"event_type": "RWF", "arguments": [{"text": "without explicitly preserving", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["without", "explicitly", "preserving"], "offsets": [55, 56, 57]}], "trigger": {"text": "without explicitly preserving", "tokens": ["without", "explicitly", "preserving"], "offsets": [55, 56, 57]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [78]}, {"text": "capsule - based neural network model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["capsule", "-", "based", "neural", "network", "model"], "offsets": [81, 82, 83, 84, 85, 86]}, {"text": "exploit", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["exploit"], "offsets": [70]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [79]}}, {"event_type": "PUR", "arguments": [{"text": "semantic hierarchy", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["semantic", "hierarchy"], "offsets": [72, 73]}, {"text": "for effective modeling", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["for", "effective", "modeling"], "offsets": [74, 75, 76]}], "trigger": {"text": "exploit", "tokens": ["exploit"], "offsets": [70]}}, {"event_type": "PRP", "arguments": [{"text": "re - routing schema", "nugget_type": "APP", "argument_type": "Content", "tokens": ["re", "-", "routing", "schema"], "offsets": [105, 106, 107, 108]}], "trigger": {"text": "proposed", "tokens": ["proposed"], "offsets": [110]}}, {"event_type": "WKS", "arguments": [{"text": "inferred intent representation", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["inferred", "intent", "representation"], "offsets": [120, 121, 122]}, {"text": "synergize", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["synergize"], "offsets": [113]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [118]}}, {"event_type": "CMP", "arguments": [{"text": "alternative model architectures", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["alternative", "model", "architectures"], "offsets": [141, 142, 143]}, {"text": "existing natural language understanding services", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["existing", "natural", "language", "understanding", "services"], "offsets": [148, 149, 150, 151, 152]}, {"text": "capsule - based neural network model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["capsule", "-", "based", "neural", "network", "model"], "offsets": [81, 82, 83, 84, 85, 86]}, {"text": "two real - world datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["two", "real", "-", "world", "datasets"], "offsets": [126, 127, 128, 129, 130]}, {"text": "effectiveness of our model", "nugget_type": "STR", "argument_type": "Result", "tokens": ["effectiveness", "of", "our", "model"], "offsets": [133, 134, 135, 136]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [131]}}, {"event_type": "RWS", "arguments": [{"text": "slot filling and intent detection", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["slot", "filling", "and", "intent", "detection"], "offsets": [29, 30, 31, 32, 33]}, {"text": "pipeline manner", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["pipeline", "manner"], "offsets": [37, 38]}], "trigger": {"text": "treat", "tokens": ["treat"], "offsets": [28]}}, {"event_type": "RWS", "arguments": [{"text": "joint models", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["joint", "models"], "offsets": [42, 43]}, {"text": "while summarizing the utterance - level intent", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["while", "summarizing", "the", "utterance", "-", "level", "intent"], "offsets": [48, 49, 50, 51, 52, 53, 54]}, {"text": "slots", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["slots"], "offsets": [47]}], "trigger": {"text": "sequentially label", "tokens": ["sequentially", "label"], "offsets": [45, 46]}}, {"event_type": "MDS", "arguments": [{"text": "dynamic routing - by - agreement schema", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["dynamic", "routing", "-", "by", "-", "agreement", "schema"], "offsets": [96, 97, 98, 99, 100, 101, 102]}, {"text": "slot filling", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["slot", "filling"], "offsets": [89, 90]}, {"text": "intent detection", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["intent", "detection"], "offsets": [92, 93]}], "trigger": {"text": "accomplishes", "tokens": ["accomplishes"], "offsets": [88]}}, {"event_type": "PUR", "arguments": [{"text": "slot filling performance", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["slot", "filling", "performance"], "offsets": [115, 116, 117]}], "trigger": {"text": "synergize", "tokens": ["synergize"], "offsets": [113]}}], "document": ["being", "able", "to", "recognize", "words", "as", "slots", "and", "detect", "the", "intent", "of", "an", "utterance", "has", "been", "a", "keen", "issue", "in", "natural", "language", "understanding", ".", "the", "existing", "works", "either", "treat", "slot", "filling", "and", "intent", "detection", "separately", "in", "a", "pipeline", "manner", ",", "or", "adopt", "joint", "models", "which", "sequentially", "label", "slots", "while", "summarizing", "the", "utterance", "-", "level", "intent", "without", "explicitly", "preserving", "the", "hierarchical", "relationship", "among", "words", ",", "slots", ",", "and", "intents", ".", "to", "exploit", "the", "semantic", "hierarchy", "for", "effective", "modeling", ",", "we", "propose", "a", "capsule", "-", "based", "neural", "network", "model", "which", "accomplishes", "slot", "filling", "and", "intent", "detection", "via", "a", "dynamic", "routing", "-", "by", "-", "agreement", "schema", ".", "a", "re", "-", "routing", "schema", "is", "proposed", "to", "further", "synergize", "the", "slot", "filling", "performance", "using", "the", "inferred", "intent", "representation", ".", "experiments", "on", "two", "real", "-", "world", "datasets", "show", "the", "effectiveness", "of", "our", "model", "when", "compared", "with", "other", "alternative", "model", "architectures", ",", "as", "well", "as", "existing", "natural", "language", "understanding", "services", "."]}, {"venue": "ACL", "title": "A Diverse Corpus for Evaluating and Developing English Math Word Problem Solvers", "abstract": "We present ASDiv (Academia Sinica Diverse MWP Dataset), a diverse (in terms of both language patterns and problem types) English math word problem (MWP) corpus for evaluating the capability of various MWP solvers. Existing MWP corpora for studying AI progress remain limited either in language usage patterns or in problem types. We thus present a new English MWP corpus with 2,305 MWPs that cover more text patterns and most problem types taught in elementary school. Each MWP is annotated with its problem type and grade level (for indicating the level of difficulty). Furthermore, we propose a metric to measure the lexicon usage diversity of a given MWP corpus, and demonstrate that ASDiv is more diverse than existing corpora. Experiments show that our proposed corpus reflects the true capability of MWP solvers more faithfully.", "doc_id": "5635d1edac086ff1b585a140b01b683b", "publication_year": 2020, "sentences": ["we present asdiv ( academia sinica diverse mwp dataset ) , a diverse ( in terms of both language patterns and problem types ) english math word problem ( mwp ) corpus for evaluating the capability of various mwp solvers .", "existing mwp corpora for studying ai progress remain limited either in language usage patterns or in problem types .", "we thus present a new english mwp corpus with 2 , 305 mwps that cover more text patterns and most problem types taught in elementary school .", "each mwp is annotated with its problem type and grade level ( for indicating the level of difficulty ) .", "furthermore , we propose a metric to measure the lexicon usage diversity of a given mwp corpus , and demonstrate that asdiv is more diverse than existing corpora .", "experiments show that our proposed corpus reflects the true capability of mwp solvers more faithfully ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [0]}, {"text": "asdiv", "nugget_type": "DST", "argument_type": "Content", "tokens": ["asdiv"], "offsets": [2]}, {"text": "evaluating", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["evaluating"], "offsets": [33]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [1]}}, {"event_type": "PUR", "arguments": [{"text": "capability of various mwp solvers", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["capability", "of", "various", "mwp", "solvers"], "offsets": [35, 36, 37, 38, 39]}], "trigger": {"text": "evaluating", "tokens": ["evaluating"], "offsets": [33]}}, {"event_type": "RWF", "arguments": [{"text": "limited", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["limited"], "offsets": [49]}, {"text": "language usage patterns", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["language", "usage", "patterns"], "offsets": [52, 53, 54]}, {"text": "problem types", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["problem", "types"], "offsets": [57, 58]}, {"text": "existing mwp corpora", "nugget_type": "DST", "argument_type": "Concern", "tokens": ["existing", "mwp", "corpora"], "offsets": [41, 42, 43]}], "trigger": {"text": "remain", "tokens": ["remain"], "offsets": [48]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [60]}, {"text": "english mwp corpus", "nugget_type": "DST", "argument_type": "Content", "tokens": ["english", "mwp", "corpus"], "offsets": [65, 66, 67]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [62]}}, {"event_type": "WKS", "arguments": [{"text": "more text patterns", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["more", "text", "patterns"], "offsets": [75, 76, 77]}, {"text": "most problem types", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["most", "problem", "types"], "offsets": [79, 80, 81]}, {"text": "taught in elementary school", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["taught", "in", "elementary", "school"], "offsets": [82, 83, 84, 85]}], "trigger": {"text": "cover", "tokens": ["cover"], "offsets": [74]}}, {"event_type": "WKS", "arguments": [{"text": "problem type", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["problem", "type"], "offsets": [93, 94]}, {"text": "grade level", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["grade", "level"], "offsets": [96, 97]}], "trigger": {"text": "annotated", "tokens": ["annotated"], "offsets": [90]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [109]}, {"text": "metric", "nugget_type": "APP", "argument_type": "Content", "tokens": ["metric"], "offsets": [112]}, {"text": "measure", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["measure"], "offsets": [114]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [110]}}, {"event_type": "PUR", "arguments": [{"text": "lexicon usage diversity of a given mwp corpus", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["lexicon", "usage", "diversity", "of", "a", "given", "mwp", "corpus"], "offsets": [116, 117, 118, 119, 120, 121, 122, 123]}], "trigger": {"text": "measure", "tokens": ["measure"], "offsets": [114]}}, {"event_type": "FIN", "arguments": [{"text": "diverse", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["diverse"], "offsets": [131]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [126]}}, {"event_type": "CMP", "arguments": [{"text": "asdiv", "nugget_type": "DST", "argument_type": "Arg1", "tokens": ["asdiv"], "offsets": [128]}, {"text": "more", "nugget_type": "STR", "argument_type": "Result", "tokens": ["more"], "offsets": [130]}, {"text": "existing corpora", "nugget_type": "DST", "argument_type": "Arg2", "tokens": ["existing", "corpora"], "offsets": [133, 134]}], "trigger": {"text": "diverse", "tokens": ["diverse"], "offsets": [131]}}, {"event_type": "FIN", "arguments": [{"text": "reflects", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["reflects"], "offsets": [142]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [137]}}, {"event_type": "FAC", "arguments": [{"text": "english mwp corpus", "nugget_type": "DST", "argument_type": "Subject", "tokens": ["english", "mwp", "corpus"], "offsets": [65, 66, 67]}, {"text": "true capability of mwp solvers", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["true", "capability", "of", "mwp", "solvers"], "offsets": [144, 145, 146, 147, 148]}, {"text": "more faithfully", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["more", "faithfully"], "offsets": [149, 150]}], "trigger": {"text": "reflects", "tokens": ["reflects"], "offsets": [142]}}], "document": ["we", "present", "asdiv", "(", "academia", "sinica", "diverse", "mwp", "dataset", ")", ",", "a", "diverse", "(", "in", "terms", "of", "both", "language", "patterns", "and", "problem", "types", ")", "english", "math", "word", "problem", "(", "mwp", ")", "corpus", "for", "evaluating", "the", "capability", "of", "various", "mwp", "solvers", ".", "existing", "mwp", "corpora", "for", "studying", "ai", "progress", "remain", "limited", "either", "in", "language", "usage", "patterns", "or", "in", "problem", "types", ".", "we", "thus", "present", "a", "new", "english", "mwp", "corpus", "with", "2", ",", "305", "mwps", "that", "cover", "more", "text", "patterns", "and", "most", "problem", "types", "taught", "in", "elementary", "school", ".", "each", "mwp", "is", "annotated", "with", "its", "problem", "type", "and", "grade", "level", "(", "for", "indicating", "the", "level", "of", "difficulty", ")", ".", "furthermore", ",", "we", "propose", "a", "metric", "to", "measure", "the", "lexicon", "usage", "diversity", "of", "a", "given", "mwp", "corpus", ",", "and", "demonstrate", "that", "asdiv", "is", "more", "diverse", "than", "existing", "corpora", ".", "experiments", "show", "that", "our", "proposed", "corpus", "reflects", "the", "true", "capability", "of", "mwp", "solvers", "more", "faithfully", "."]}, {"venue": "ACL", "title": "Named Entity Recognition with Small Strongly Labeled and Large Weakly Labeled Data", "abstract": "Weak supervision has shown promising results in many natural language processing tasks, such as Named Entity Recognition (NER). Existing work mainly focuses on learning deep NER models only with weak supervision, i.e., without any human annotation, and shows that by merely using weakly labeled data, one can achieve good performance, though still underperforms fully supervised NER with manually/strongly labeled data. In this paper, we consider a more practical scenario, where we have both a small amount of strongly labeled data and a large amount of weakly labeled data. Unfortunately, we observe that weakly labeled data does not necessarily improve, or even deteriorate the model performance (due to the extensive noise in the weak labels) when we train deep NER models over a simple or weighted combination of the strongly labeled and weakly labeled data. To address this issue, we propose a new multi-stage computational framework \u2013 NEEDLE with three essential ingredients: (1) weak label completion, (2) noise-aware loss function, and (3) final fine-tuning over the strongly labeled data. Through experiments on E-commerce query NER and Biomedical NER, we demonstrate that NEEDLE can effectively suppress the noise of the weak labels and outperforms existing methods. In particular, we achieve new SOTA F1-scores on 3 Biomedical NER datasets: BC5CDR-chem 93.74, BC5CDR-disease 90.69, NCBI-disease 92.28.", "doc_id": "9bdb2fefd21555a39bf495a4a894856a", "publication_year": 2021, "sentences": ["weak supervision has shown promising results in many natural language processing tasks , such as named entity recognition ( ner ) .", "existing work mainly focuses on learning deep ner models only with weak supervision , i . e . , without any human annotation , and shows that by merely using weakly labeled data , one can achieve good performance , though still underperforms fully supervised ner with manually / strongly labeled data .", "in this paper , we consider a more practical scenario , where we have both a small amount of strongly labeled data and a large amount of weakly labeled data .", "unfortunately , we observe that weakly labeled data does not necessarily improve , or even deteriorate the model performance ( due to the extensive noise in the weak labels ) when we train deep ner models over a simple or weighted combination of the strongly labeled and weakly labeled data .", "to address this issue , we propose a new multi - stage computational framework \u2013 needle with three essential ingredients : ( 1 ) weak label completion , ( 2 ) noise - aware loss function , and ( 3 ) final fine - tuning over the strongly labeled data .", "through experiments on e - commerce query ner and biomedical ner , we demonstrate that needle can effectively suppress the noise of the weak labels and outperforms existing methods .", "in particular , we achieve new sota f1 - scores on 3 biomedical ner datasets : bc5cdr - chem 93 . 74 , bc5cdr - disease 90 . 69 , ncbi - disease 92 . 28 ."], "events": [{"event_type": "RWS", "arguments": [{"text": "existing work", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["existing", "work"], "offsets": [22, 23]}, {"text": "deep ner models", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["deep", "ner", "models"], "offsets": [28, 29, 30]}, {"text": "with weak supervision", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "weak", "supervision"], "offsets": [32, 33, 34]}], "trigger": {"text": "learning", "tokens": ["learning"], "offsets": [27]}}, {"event_type": "PRP", "arguments": [{"text": "practical scenario", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["practical", "scenario"], "offsets": [83, 84]}], "trigger": {"text": "consider", "tokens": ["consider"], "offsets": [80]}}, {"event_type": "CMP", "arguments": [{"text": "weakly labeled data", "nugget_type": "FEA", "argument_type": "Arg1", "tokens": ["weakly", "labeled", "data"], "offsets": [111, 112, 113]}, {"text": "model performance", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["model", "performance"], "offsets": [123, 124]}, {"text": "when we train deep ner models", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "we", "train", "deep", "ner", "models"], "offsets": [136, 137, 138, 139, 140, 141]}, {"text": "simple or weighted combination of the strongly labeled and weakly labeled data", "nugget_type": "FEA", "argument_type": "Arg2", "tokens": ["simple", "or", "weighted", "combination", "of", "the", "strongly", "labeled", "and", "weakly", "labeled", "data"], "offsets": [144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155]}], "trigger": {"text": "deteriorate", "tokens": ["deteriorate"], "offsets": [121]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [162]}, {"text": "multi - stage computational framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["multi", "-", "stage", "computational", "framework"], "offsets": [166, 167, 168, 169, 170]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [163]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [220]}, {"text": "effectively suppress", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["effectively", "suppress"], "offsets": [225, 226]}, {"text": "outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforms"], "offsets": [234]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [221]}}, {"event_type": "FAC", "arguments": [{"text": "noise of the weak labels", "nugget_type": "WEA", "argument_type": "Object", "tokens": ["noise", "of", "the", "weak", "labels"], "offsets": [228, 229, 230, 231, 232]}, {"text": "multi - stage computational framework", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["multi", "-", "stage", "computational", "framework"], "offsets": [166, 167, 168, 169, 170]}], "trigger": {"text": "effectively suppress", "tokens": ["effectively", "suppress"], "offsets": [225, 226]}}, {"event_type": "CMP", "arguments": [{"text": "existing methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["existing", "methods"], "offsets": [235, 236]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [234]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [234]}}, {"event_type": "RWS", "arguments": [{"text": "existing work", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["existing", "work"], "offsets": [22, 23]}, {"text": "weakly labeled data", "nugget_type": "DST", "argument_type": "BaseComponent", "tokens": ["weakly", "labeled", "data"], "offsets": [52, 53, 54]}, {"text": "good performance", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["good", "performance"], "offsets": [59, 60]}], "trigger": {"text": "shows", "tokens": ["shows"], "offsets": [47]}}, {"event_type": "CMP", "arguments": [{"text": "existing work", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["existing", "work"], "offsets": [22, 23]}, {"text": "fully supervised ner with manually / strongly labeled data", "nugget_type": "MOD", "argument_type": "Arg2", "tokens": ["fully", "supervised", "ner", "with", "manually", "/", "strongly", "labeled", "data"], "offsets": [65, 66, 67, 68, 69, 70, 71, 72, 73]}], "trigger": {"text": "underperforms", "tokens": ["underperforms"], "offsets": [64]}}, {"event_type": "ITT", "arguments": [{"text": "weak supervision", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["weak", "supervision"], "offsets": [0, 1]}, {"text": "in many natural language processing tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "many", "natural", "language", "processing", "tasks"], "offsets": [6, 7, 8, 9, 10, 11]}], "trigger": {"text": "shown", "tokens": ["shown"], "offsets": [3]}}, {"event_type": "FAC", "arguments": [{"text": "bc5cdr - chem", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["bc5cdr", "-", "chem"], "offsets": [254, 255, 256]}, {"text": "93 . 74", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["93", ".", "74"], "offsets": [257, 258, 259]}, {"text": "sota f1 - scores", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["sota", "f1", "-", "scores"], "offsets": [244, 245, 246, 247]}, {"text": "multi - stage computational framework \u2013 needle", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["multi", "-", "stage", "computational", "framework", "\u2013", "needle"], "offsets": [166, 167, 168, 169, 170, 171, 172]}], "trigger": {"text": "achieve", "tokens": ["achieve"], "offsets": [242]}}, {"event_type": "FAC", "arguments": [{"text": "bc5cdr - disease", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["bc5cdr", "-", "disease"], "offsets": [261, 262, 263]}, {"text": "90 . 69", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["90", ".", "69"], "offsets": [264, 265, 266]}, {"text": "multi - stage computational framework \u2013 needle", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["multi", "-", "stage", "computational", "framework", "\u2013", "needle"], "offsets": [166, 167, 168, 169, 170, 171, 172]}, {"text": "sota f1 - scores", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["sota", "f1", "-", "scores"], "offsets": [244, 245, 246, 247]}], "trigger": {"text": "achieve", "tokens": ["achieve"], "offsets": [242]}}, {"event_type": "FAC", "arguments": [{"text": "ncbi - disease", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["ncbi", "-", "disease"], "offsets": [268, 269, 270]}, {"text": "92 . 28", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["92", ".", "28"], "offsets": [271, 272, 273]}, {"text": "multi - stage computational framework \u2013 needle", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["multi", "-", "stage", "computational", "framework", "\u2013", "needle"], "offsets": [166, 167, 168, 169, 170, 171, 172]}, {"text": "sota f1 - scores", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["sota", "f1", "-", "scores"], "offsets": [244, 245, 246, 247]}], "trigger": {"text": "achieve", "tokens": ["achieve"], "offsets": [242]}}], "document": ["weak", "supervision", "has", "shown", "promising", "results", "in", "many", "natural", "language", "processing", "tasks", ",", "such", "as", "named", "entity", "recognition", "(", "ner", ")", ".", "existing", "work", "mainly", "focuses", "on", "learning", "deep", "ner", "models", "only", "with", "weak", "supervision", ",", "i", ".", "e", ".", ",", "without", "any", "human", "annotation", ",", "and", "shows", "that", "by", "merely", "using", "weakly", "labeled", "data", ",", "one", "can", "achieve", "good", "performance", ",", "though", "still", "underperforms", "fully", "supervised", "ner", "with", "manually", "/", "strongly", "labeled", "data", ".", "in", "this", "paper", ",", "we", "consider", "a", "more", "practical", "scenario", ",", "where", "we", "have", "both", "a", "small", "amount", "of", "strongly", "labeled", "data", "and", "a", "large", "amount", "of", "weakly", "labeled", "data", ".", "unfortunately", ",", "we", "observe", "that", "weakly", "labeled", "data", "does", "not", "necessarily", "improve", ",", "or", "even", "deteriorate", "the", "model", "performance", "(", "due", "to", "the", "extensive", "noise", "in", "the", "weak", "labels", ")", "when", "we", "train", "deep", "ner", "models", "over", "a", "simple", "or", "weighted", "combination", "of", "the", "strongly", "labeled", "and", "weakly", "labeled", "data", ".", "to", "address", "this", "issue", ",", "we", "propose", "a", "new", "multi", "-", "stage", "computational", "framework", "\u2013", "needle", "with", "three", "essential", "ingredients", ":", "(", "1", ")", "weak", "label", "completion", ",", "(", "2", ")", "noise", "-", "aware", "loss", "function", ",", "and", "(", "3", ")", "final", "fine", "-", "tuning", "over", "the", "strongly", "labeled", "data", ".", "through", "experiments", "on", "e", "-", "commerce", "query", "ner", "and", "biomedical", "ner", ",", "we", "demonstrate", "that", "needle", "can", "effectively", "suppress", "the", "noise", "of", "the", "weak", "labels", "and", "outperforms", "existing", "methods", ".", "in", "particular", ",", "we", "achieve", "new", "sota", "f1", "-", "scores", "on", "3", "biomedical", "ner", "datasets", ":", "bc5cdr", "-", "chem", "93", ".", "74", ",", "bc5cdr", "-", "disease", "90", ".", "69", ",", "ncbi", "-", "disease", "92", ".", "28", "."]}, {"venue": "ACL", "title": "Quality Controlled Paraphrase Generation", "abstract": "Paraphrase generation has been widely used in various downstream tasks. Most tasks benefit mainly from high quality paraphrases, namely those that are semantically similar to, yet linguistically diverse from, the original sentence. Generating high-quality paraphrases is challenging as it becomes increasingly hard to preserve meaning as linguistic diversity increases. Recent works achieve nice results by controlling specific aspects of the paraphrase, such as its syntactic tree. However, they do not allow to directly control the quality of the generated paraphrase, and suffer from low flexibility and scalability. Here we propose QCPG, a quality-guided controlled paraphrase generation model, that allows directly controlling the quality dimensions. Furthermore, we suggest a method that given a sentence, identifies points in the quality control space that are expected to yield optimal generated paraphrases. We show that our method is able to generate paraphrases which maintain the original meaning while achieving higher diversity than the uncontrolled baseline. The models, the code, and the data can be found in https://github.com/IBM/quality-controlled-paraphrase-generation.", "doc_id": "d77344ef1d354cea57f1fedc42f0ecdf", "publication_year": 2022, "sentences": ["paraphrase generation has been widely used in various downstream tasks .", "most tasks benefit mainly from high quality paraphrases , namely those that are semantically similar to , yet linguistically diverse from , the original sentence .", "generating high - quality paraphrases is challenging as it becomes increasingly hard to preserve meaning as linguistic diversity increases .", "recent works achieve nice results by controlling specific aspects of the paraphrase , such as its syntactic tree .", "however , they do not allow to directly control the quality of the generated paraphrase , and suffer from low flexibility and scalability .", "here we propose qcpg , a quality - guided controlled paraphrase generation model , that allows directly controlling the quality dimensions .", "furthermore , we suggest a method that given a sentence , identifies points in the quality control space that are expected to yield optimal generated paraphrases .", "we show that our method is able to generate paraphrases which maintain the original meaning while achieving higher diversity than the uncontrolled baseline .", "the models , the code , and the data can be found in https : / / github . com / ibm / quality - controlled - paraphrase - generation ."], "events": [{"event_type": "ITT", "arguments": [{"text": "paraphrase generation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["paraphrase", "generation"], "offsets": [0, 1]}], "trigger": {"text": "used", "tokens": ["used"], "offsets": [5]}}, {"event_type": "RWF", "arguments": [{"text": "do not allow", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["do", "not", "allow"], "offsets": [79, 80, 81]}, {"text": "directly control", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["directly", "control"], "offsets": [83, 84]}], "trigger": {"text": "do not allow", "tokens": ["do", "not", "allow"], "offsets": [79, 80, 81]}}, {"event_type": "PUR", "arguments": [{"text": "quality of the generated paraphrase", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["quality", "of", "the", "generated", "paraphrase"], "offsets": [86, 87, 88, 89, 90]}], "trigger": {"text": "directly control", "tokens": ["directly", "control"], "offsets": [83, 84]}}, {"event_type": "RWF", "arguments": [{"text": "suffer", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["suffer"], "offsets": [93]}, {"text": "specific aspects of the paraphrase", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["specific", "aspects", "of", "the", "paraphrase"], "offsets": [64, 65, 66, 67, 68]}, {"text": "low flexibility", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["low", "flexibility"], "offsets": [95, 96]}], "trigger": {"text": "suffer", "tokens": ["suffer"], "offsets": [93]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [101]}, {"text": "quality - guided controlled paraphrase generation model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["quality", "-", "guided", "controlled", "paraphrase", "generation", "model"], "offsets": [106, 107, 108, 109, 110, 111, 112]}, {"text": "directly controlling", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["directly", "controlling"], "offsets": [116, 117]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [102]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [124]}, {"text": "method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["method"], "offsets": [127]}], "trigger": {"text": "suggest", "tokens": ["suggest"], "offsets": [125]}}, {"event_type": "MDS", "arguments": [{"text": "given a sentence", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["given", "a", "sentence"], "offsets": [129, 130, 131]}, {"text": "yield", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["yield"], "offsets": [144]}, {"text": "points", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["points"], "offsets": [134]}, {"text": "quality control space", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["quality", "control", "space"], "offsets": [137, 138, 139]}], "trigger": {"text": "identifies", "tokens": ["identifies"], "offsets": [133]}}, {"event_type": "PUR", "arguments": [{"text": "optimal generated paraphrases", "nugget_type": "STR", "argument_type": "Aim", "tokens": ["optimal", "generated", "paraphrases"], "offsets": [145, 146, 147]}], "trigger": {"text": "yield", "tokens": ["yield"], "offsets": [144]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [149]}, {"text": "generate", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["generate"], "offsets": [157]}, {"text": "achieving", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["achieving"], "offsets": [165]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [150]}}, {"event_type": "FAC", "arguments": [{"text": "paraphrases which maintain the original meaning", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["paraphrases", "which", "maintain", "the", "original", "meaning"], "offsets": [158, 159, 160, 161, 162, 163]}, {"text": "quality - guided controlled paraphrase generation model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["quality", "-", "guided", "controlled", "paraphrase", "generation", "model"], "offsets": [106, 107, 108, 109, 110, 111, 112]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [157]}}, {"event_type": "CMP", "arguments": [{"text": "higher diversity", "nugget_type": "STR", "argument_type": "Result", "tokens": ["higher", "diversity"], "offsets": [166, 167]}, {"text": "uncontrolled baseline", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["uncontrolled", "baseline"], "offsets": [170, 171]}, {"text": "quality - guided controlled paraphrase generation model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["quality", "-", "guided", "controlled", "paraphrase", "generation", "model"], "offsets": [106, 107, 108, 109, 110, 111, 112]}], "trigger": {"text": "achieving", "tokens": ["achieving"], "offsets": [165]}}, {"event_type": "PUR", "arguments": [{"text": "quality dimensions", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["quality", "dimensions"], "offsets": [119, 120]}], "trigger": {"text": "directly controlling", "tokens": ["directly", "controlling"], "offsets": [116, 117]}}], "document": ["paraphrase", "generation", "has", "been", "widely", "used", "in", "various", "downstream", "tasks", ".", "most", "tasks", "benefit", "mainly", "from", "high", "quality", "paraphrases", ",", "namely", "those", "that", "are", "semantically", "similar", "to", ",", "yet", "linguistically", "diverse", "from", ",", "the", "original", "sentence", ".", "generating", "high", "-", "quality", "paraphrases", "is", "challenging", "as", "it", "becomes", "increasingly", "hard", "to", "preserve", "meaning", "as", "linguistic", "diversity", "increases", ".", "recent", "works", "achieve", "nice", "results", "by", "controlling", "specific", "aspects", "of", "the", "paraphrase", ",", "such", "as", "its", "syntactic", "tree", ".", "however", ",", "they", "do", "not", "allow", "to", "directly", "control", "the", "quality", "of", "the", "generated", "paraphrase", ",", "and", "suffer", "from", "low", "flexibility", "and", "scalability", ".", "here", "we", "propose", "qcpg", ",", "a", "quality", "-", "guided", "controlled", "paraphrase", "generation", "model", ",", "that", "allows", "directly", "controlling", "the", "quality", "dimensions", ".", "furthermore", ",", "we", "suggest", "a", "method", "that", "given", "a", "sentence", ",", "identifies", "points", "in", "the", "quality", "control", "space", "that", "are", "expected", "to", "yield", "optimal", "generated", "paraphrases", ".", "we", "show", "that", "our", "method", "is", "able", "to", "generate", "paraphrases", "which", "maintain", "the", "original", "meaning", "while", "achieving", "higher", "diversity", "than", "the", "uncontrolled", "baseline", ".", "the", "models", ",", "the", "code", ",", "and", "the", "data", "can", "be", "found", "in", "https", ":", "/", "/", "github", ".", "com", "/", "ibm", "/", "quality", "-", "controlled", "-", "paraphrase", "-", "generation", "."]}, {"venue": "ACL", "title": "Are Shortest Rationales the Best Explanations for Human Understanding?", "abstract": "Existing self-explaining models typically favor extracting the shortest possible rationales \u2014 snippets of an input text \u201cresponsible for\u201d corresponding output \u2014 to explain the model prediction, with the assumption that shorter rationales are more intuitive to humans. However, this assumption has yet to be validated. Is the shortest rationale indeed the most human-understandable? To answer this question, we design a self-explaining model, LimitedInk, which allows users to extract rationales at any target length. Compared to existing baselines, LimitedInk achieves compatible end-task performance and human-annotated rationale agreement, making it a suitable representation of the recent class of self-explaining models. We use LimitedInk to conduct a user study on the impact of rationale length, where we ask human judges to predict the sentiment label of documents based only on LimitedInk-generated rationales with different lengths. We show rationales that are too short do not help humans predict labels better than randomly masked text, suggesting the need for more careful design of the best human rationales.", "doc_id": "1524996a0edaeb5a0661d773297097de", "publication_year": 2022, "sentences": ["existing self - explaining models typically favor extracting the shortest possible rationales \u2014 snippets of an input text \u201c responsible for \u201d corresponding output \u2014 to explain the model prediction , with the assumption that shorter rationales are more intuitive to humans .", "however , this assumption has yet to be validated .", "is the shortest rationale indeed the most human - understandable ?", "to answer this question , we design a self - explaining model , limitedink , which allows users to extract rationales at any target length .", "compared to existing baselines , limitedink achieves compatible end - task performance and human - annotated rationale agreement , making it a suitable representation of the recent class of self - explaining models .", "we use limitedink to conduct a user study on the impact of rationale length , where we ask human judges to predict the sentiment label of documents based only on limitedink - generated rationales with different lengths .", "we show rationales that are too short do not help humans predict labels better than randomly masked text , suggesting the need for more careful design of the best human rationales ."], "events": [{"event_type": "RWS", "arguments": [{"text": "existing self - explaining models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["existing", "self", "-", "explaining", "models"], "offsets": [0, 1, 2, 3, 4]}, {"text": "shortest possible rationales", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["shortest", "possible", "rationales"], "offsets": [9, 10, 11]}, {"text": "model prediction", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["model", "prediction"], "offsets": [28, 29]}, {"text": "with the assumption that shorter rationales are more intuitive to humans", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "the", "assumption", "that", "shorter", "rationales", "are", "more", "intuitive", "to", "humans"], "offsets": [31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41]}], "trigger": {"text": "explain", "tokens": ["explain"], "offsets": [26]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [69]}, {"text": "self - explaining model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["self", "-", "explaining", "model"], "offsets": [72, 73, 74, 75]}, {"text": "extract", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["extract"], "offsets": [83]}], "trigger": {"text": "design", "tokens": ["design"], "offsets": [70]}}, {"event_type": "CMP", "arguments": [{"text": "existing baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["existing", "baselines"], "offsets": [92, 93]}, {"text": "limitedink", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["limitedink"], "offsets": [95]}, {"text": "compatible end - task performance", "nugget_type": "STR", "argument_type": "Result", "tokens": ["compatible", "end", "-", "task", "performance"], "offsets": [97, 98, 99, 100, 101]}, {"text": "human - annotated rationale agreement", "nugget_type": "STR", "argument_type": "Result", "tokens": ["human", "-", "annotated", "rationale", "agreement"], "offsets": [103, 104, 105, 106, 107]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [96]}}, {"event_type": "FAC", "arguments": [{"text": "limitedink", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["limitedink"], "offsets": [95]}, {"text": "suitable representation of the recent class of self - explaining models", "nugget_type": "STR", "argument_type": "Object", "tokens": ["suitable", "representation", "of", "the", "recent", "class", "of", "self", "-", "explaining", "models"], "offsets": [112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122]}], "trigger": {"text": "making", "tokens": ["making"], "offsets": [109]}}, {"event_type": "MDS", "arguments": [{"text": "sentiment label of documents", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["sentiment", "label", "of", "documents"], "offsets": [147, 148, 149, 150]}, {"text": "limitedink - generated rationales", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["limitedink", "-", "generated", "rationales"], "offsets": [154, 155, 156, 157]}], "trigger": {"text": "predict", "tokens": ["predict"], "offsets": [145]}}, {"event_type": "PUR", "arguments": [{"text": "rationales at any target length", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["rationales", "at", "any", "target", "length"], "offsets": [84, 85, 86, 87, 88]}], "trigger": {"text": "extract", "tokens": ["extract"], "offsets": [83]}}, {"event_type": "PUR", "arguments": [{"text": "user study", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["user", "study"], "offsets": [130, 131]}, {"text": "on the impact of rationale length", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "the", "impact", "of", "rationale", "length"], "offsets": [132, 133, 134, 135, 136, 137]}], "trigger": {"text": "conduct", "tokens": ["conduct"], "offsets": [128]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [162]}, {"text": "not help", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["not", "help"], "offsets": [170, 171]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [163]}}, {"event_type": "CMP", "arguments": [{"text": "randomly masked text", "nugget_type": "FEA", "argument_type": "Arg2", "tokens": ["randomly", "masked", "text"], "offsets": [177, 178, 179]}, {"text": "humans predict labels", "nugget_type": "FEA", "argument_type": "Arg1", "tokens": ["humans", "predict", "labels"], "offsets": [172, 173, 174]}], "trigger": {"text": "not help", "tokens": ["not", "help"], "offsets": [170, 171]}}], "document": ["existing", "self", "-", "explaining", "models", "typically", "favor", "extracting", "the", "shortest", "possible", "rationales", "\u2014", "snippets", "of", "an", "input", "text", "\u201c", "responsible", "for", "\u201d", "corresponding", "output", "\u2014", "to", "explain", "the", "model", "prediction", ",", "with", "the", "assumption", "that", "shorter", "rationales", "are", "more", "intuitive", "to", "humans", ".", "however", ",", "this", "assumption", "has", "yet", "to", "be", "validated", ".", "is", "the", "shortest", "rationale", "indeed", "the", "most", "human", "-", "understandable", "?", "to", "answer", "this", "question", ",", "we", "design", "a", "self", "-", "explaining", "model", ",", "limitedink", ",", "which", "allows", "users", "to", "extract", "rationales", "at", "any", "target", "length", ".", "compared", "to", "existing", "baselines", ",", "limitedink", "achieves", "compatible", "end", "-", "task", "performance", "and", "human", "-", "annotated", "rationale", "agreement", ",", "making", "it", "a", "suitable", "representation", "of", "the", "recent", "class", "of", "self", "-", "explaining", "models", ".", "we", "use", "limitedink", "to", "conduct", "a", "user", "study", "on", "the", "impact", "of", "rationale", "length", ",", "where", "we", "ask", "human", "judges", "to", "predict", "the", "sentiment", "label", "of", "documents", "based", "only", "on", "limitedink", "-", "generated", "rationales", "with", "different", "lengths", ".", "we", "show", "rationales", "that", "are", "too", "short", "do", "not", "help", "humans", "predict", "labels", "better", "than", "randomly", "masked", "text", ",", "suggesting", "the", "need", "for", "more", "careful", "design", "of", "the", "best", "human", "rationales", "."]}, {"venue": "ACL", "title": "Measure and Evaluation of Semantic Divergence across Two Languages", "abstract": "Languages are dynamic systems: word usage may change over time, reflecting various societal factors. However, all languages do not evolve identically: the impact of an event, the influence of a trend or thinking, can differ between communities. In this paper, we propose to track these divergences by comparing the evolution of a word and its translation across two languages. We investigate several methods of building time-varying and bilingual word embeddings, using contextualised and non-contextualised embeddings. We propose a set of scenarios to characterize semantic divergence across two languages, along with a setup to differentiate them in a bilingual corpus. We evaluate the different methods by generating a corpus of synthetic semantic change across two languages, English and French, before applying them to newspaper corpora to detect bilingual semantic divergence and provide qualitative insight for the task. We conclude that BERT embeddings coupled with a clustering step lead to the best performance on synthetic corpora; however, the performance of CBOW embeddings is very competitive and more adapted to an exploratory analysis on a large corpus.", "doc_id": "4693f5a6ef2a724bf8eb00cb5d66e192", "publication_year": 2021, "sentences": ["languages are dynamic systems : word usage may change over time , reflecting various societal factors .", "however , all languages do not evolve identically : the impact of an event , the influence of a trend or thinking , can differ between communities .", "in this paper , we propose to track these divergences by comparing the evolution of a word and its translation across two languages .", "we investigate several methods of building time - varying and bilingual word embeddings , using contextualised and non - contextualised embeddings .", "we propose a set of scenarios to characterize semantic divergence across two languages , along with a setup to differentiate them in a bilingual corpus .", "we evaluate the different methods by generating a corpus of synthetic semantic change across two languages , english and french , before applying them to newspaper corpora to detect bilingual semantic divergence and provide qualitative insight for the task .", "we conclude that bert embeddings coupled with a clustering step lead to the best performance on synthetic corpora ; however , the performance of cbow embeddings is very competitive and more adapted to an exploratory analysis on a large corpus ."], "events": [{"event_type": "MDS", "arguments": [{"text": "divergences", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["divergences"], "offsets": [54]}, {"text": "evolution of a word", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["evolution", "of", "a", "word"], "offsets": [58, 59, 60, 61]}, {"text": "translation across two languages", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["translation", "across", "two", "languages"], "offsets": [64, 65, 66, 67]}], "trigger": {"text": "track", "tokens": ["track"], "offsets": [52]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [69]}, {"text": "methods of building time - varying and bilingual word embeddings", "nugget_type": "APP", "argument_type": "Content", "tokens": ["methods", "of", "building", "time", "-", "varying", "and", "bilingual", "word", "embeddings"], "offsets": [72, 73, 74, 75, 76, 77, 78, 79, 80, 81]}, {"text": "using contextualised and non - contextualised embeddings", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["using", "contextualised", "and", "non", "-", "contextualised", "embeddings"], "offsets": [83, 84, 85, 86, 87, 88, 89]}], "trigger": {"text": "investigate", "tokens": ["investigate"], "offsets": [70]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [91]}, {"text": "scenarios", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["scenarios"], "offsets": [96]}, {"text": "characterize", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["characterize"], "offsets": [98]}, {"text": "setup", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["setup"], "offsets": [108]}, {"text": "differentiate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["differentiate"], "offsets": [110]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [92]}}, {"event_type": "PUR", "arguments": [{"text": "semantic divergence", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["semantic", "divergence"], "offsets": [99, 100]}, {"text": "across two languages", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["across", "two", "languages"], "offsets": [101, 102, 103]}], "trigger": {"text": "characterize", "tokens": ["characterize"], "offsets": [98]}}, {"event_type": "PUR", "arguments": [{"text": "bilingual corpus", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["bilingual", "corpus"], "offsets": [114, 115]}, {"text": "scenarios", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["scenarios"], "offsets": [96]}], "trigger": {"text": "differentiate", "tokens": ["differentiate"], "offsets": [110]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [117]}, {"text": "corpus of synthetic semantic change", "nugget_type": "DST", "argument_type": "Content", "tokens": ["corpus", "of", "synthetic", "semantic", "change"], "offsets": [125, 126, 127, 128, 129]}, {"text": "evaluate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["evaluate"], "offsets": [118]}], "trigger": {"text": "generating", "tokens": ["generating"], "offsets": [123]}}, {"event_type": "PUR", "arguments": [{"text": "different methods", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["different", "methods"], "offsets": [120, 121]}], "trigger": {"text": "evaluate", "tokens": ["evaluate"], "offsets": [118]}}, {"event_type": "MDS", "arguments": [{"text": "different methods", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["different", "methods"], "offsets": [120, 121]}, {"text": "newspaper corpora", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["newspaper", "corpora"], "offsets": [142, 143]}, {"text": "detect", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["detect"], "offsets": [145]}, {"text": "provide", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["provide"], "offsets": [150]}], "trigger": {"text": "applying", "tokens": ["applying"], "offsets": [139]}}, {"event_type": "PUR", "arguments": [{"text": "bilingual semantic divergence", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["bilingual", "semantic", "divergence"], "offsets": [146, 147, 148]}], "trigger": {"text": "detect", "tokens": ["detect"], "offsets": [145]}}, {"event_type": "PUR", "arguments": [{"text": "qualitative insight", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["qualitative", "insight"], "offsets": [151, 152]}], "trigger": {"text": "provide", "tokens": ["provide"], "offsets": [150]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [157]}, {"text": "lead", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["lead"], "offsets": [167]}], "trigger": {"text": "conclude", "tokens": ["conclude"], "offsets": [158]}}, {"event_type": "FAC", "arguments": [{"text": "bert embeddings", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["bert", "embeddings"], "offsets": [160, 161]}, {"text": "clustering step", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["clustering", "step"], "offsets": [165, 166]}, {"text": "synthetic corpora", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["synthetic", "corpora"], "offsets": [173, 174]}, {"text": "best performance", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["best", "performance"], "offsets": [170, 171]}], "trigger": {"text": "lead", "tokens": ["lead"], "offsets": [167]}}, {"event_type": "FAC", "arguments": [{"text": "very", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["very"], "offsets": [184]}, {"text": "cbow embeddings", "nugget_type": "MOD", "argument_type": "Subject", "tokens": ["cbow", "embeddings"], "offsets": [181, 182]}, {"text": "performance", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["performance"], "offsets": [179]}], "trigger": {"text": "competitive", "tokens": ["competitive"], "offsets": [185]}}, {"event_type": "CMP", "arguments": [{"text": "exploratory analysis", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["exploratory", "analysis"], "offsets": [191, 192]}, {"text": "large corpus", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["large", "corpus"], "offsets": [195, 196]}, {"text": "cbow embeddings", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["cbow", "embeddings"], "offsets": [181, 182]}], "trigger": {"text": "more adapted", "tokens": ["more", "adapted"], "offsets": [187, 188]}}], "document": ["languages", "are", "dynamic", "systems", ":", "word", "usage", "may", "change", "over", "time", ",", "reflecting", "various", "societal", "factors", ".", "however", ",", "all", "languages", "do", "not", "evolve", "identically", ":", "the", "impact", "of", "an", "event", ",", "the", "influence", "of", "a", "trend", "or", "thinking", ",", "can", "differ", "between", "communities", ".", "in", "this", "paper", ",", "we", "propose", "to", "track", "these", "divergences", "by", "comparing", "the", "evolution", "of", "a", "word", "and", "its", "translation", "across", "two", "languages", ".", "we", "investigate", "several", "methods", "of", "building", "time", "-", "varying", "and", "bilingual", "word", "embeddings", ",", "using", "contextualised", "and", "non", "-", "contextualised", "embeddings", ".", "we", "propose", "a", "set", "of", "scenarios", "to", "characterize", "semantic", "divergence", "across", "two", "languages", ",", "along", "with", "a", "setup", "to", "differentiate", "them", "in", "a", "bilingual", "corpus", ".", "we", "evaluate", "the", "different", "methods", "by", "generating", "a", "corpus", "of", "synthetic", "semantic", "change", "across", "two", "languages", ",", "english", "and", "french", ",", "before", "applying", "them", "to", "newspaper", "corpora", "to", "detect", "bilingual", "semantic", "divergence", "and", "provide", "qualitative", "insight", "for", "the", "task", ".", "we", "conclude", "that", "bert", "embeddings", "coupled", "with", "a", "clustering", "step", "lead", "to", "the", "best", "performance", "on", "synthetic", "corpora", ";", "however", ",", "the", "performance", "of", "cbow", "embeddings", "is", "very", "competitive", "and", "more", "adapted", "to", "an", "exploratory", "analysis", "on", "a", "large", "corpus", "."]}, {"venue": "ACL", "title": "A Re-evaluation of Knowledge Graph Completion Methods", "abstract": "Knowledge Graph Completion (KGC) aims at automatically predicting missing links for large-scale knowledge graphs. A vast number of state-of-the-art KGC techniques have got published at top conferences in several research fields, including data mining, machine learning, and natural language processing. However, we notice that several recent papers report very high performance, which largely outperforms previous state-of-the-art methods. In this paper, we find that this can be attributed to the inappropriate evaluation protocol used by them and propose a simple evaluation protocol to address this problem. The proposed protocol is robust to handle bias in the model, which can substantially affect the final results. We conduct extensive experiments and report performance of several existing methods using our protocol. The reproducible code has been made publicly available.", "doc_id": "4f4dff1c325cf73f453be937fcd97ac0", "publication_year": 2020, "sentences": ["knowledge graph completion ( kgc ) aims at automatically predicting missing links for large - scale knowledge graphs .", "a vast number of state - of - the - art kgc techniques have got published at top conferences in several research fields , including data mining , machine learning , and natural language processing .", "however , we notice that several recent papers report very high performance , which largely outperforms previous state - of - the - art methods .", "in this paper , we find that this can be attributed to the inappropriate evaluation protocol used by them and propose a simple evaluation protocol to address this problem .", "the proposed protocol is robust to handle bias in the model , which can substantially affect the final results .", "we conduct extensive experiments and report performance of several existing methods using our protocol .", "the reproducible code has been made publicly available ."], "events": [{"event_type": "ITT", "arguments": [{"text": "knowledge graph completion", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["kgc"], "offsets": [30]}], "trigger": {"text": "aims", "tokens": ["aims"], "offsets": [6]}}, {"event_type": "CMP", "arguments": [{"text": "largely", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["largely"], "offsets": [69]}, {"text": "previous state - of - the - art methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["previous", "state", "-", "of", "-", "the", "-", "art", "methods"], "offsets": [71, 72, 73, 74, 75, 76, 77, 78, 79]}, {"text": "high performance", "nugget_type": "STR", "argument_type": "Result", "tokens": ["high", "performance"], "offsets": [65, 66]}, {"text": "state - of - the - art kgc techniques", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["state", "-", "of", "-", "the", "-", "art", "kgc", "techniques"], "offsets": [23, 24, 25, 26, 27, 28, 29, 30, 31]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [70]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [85]}, {"text": "used", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["used"], "offsets": [97]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [86]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [85]}, {"text": "simple evaluation protocol", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["simple", "evaluation", "protocol"], "offsets": [103, 104, 105]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [101]}}, {"event_type": "FAC", "arguments": [{"text": "inappropriate evaluation protocol", "nugget_type": "APP", "argument_type": "Object", "tokens": ["inappropriate", "evaluation", "protocol"], "offsets": [94, 95, 96]}, {"text": "previous state - of - the - art methods", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["previous", "state", "-", "of", "-", "the", "-", "art", "methods"], "offsets": [71, 72, 73, 74, 75, 76, 77, 78, 79]}], "trigger": {"text": "used", "tokens": ["used"], "offsets": [97]}}, {"event_type": "FAC", "arguments": [{"text": "simple evaluation protocol", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["simple", "evaluation", "protocol"], "offsets": [103, 104, 105]}, {"text": "handle", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["handle"], "offsets": [117]}], "trigger": {"text": "robust", "tokens": ["robust"], "offsets": [115]}}, {"event_type": "PUR", "arguments": [{"text": "in the model", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "model"], "offsets": [119, 120, 121]}, {"text": "bias", "nugget_type": "WEA", "argument_type": "Aim", "tokens": ["bias"], "offsets": [118]}], "trigger": {"text": "handle", "tokens": ["handle"], "offsets": [117]}}], "document": ["knowledge", "graph", "completion", "(", "kgc", ")", "aims", "at", "automatically", "predicting", "missing", "links", "for", "large", "-", "scale", "knowledge", "graphs", ".", "a", "vast", "number", "of", "state", "-", "of", "-", "the", "-", "art", "kgc", "techniques", "have", "got", "published", "at", "top", "conferences", "in", "several", "research", "fields", ",", "including", "data", "mining", ",", "machine", "learning", ",", "and", "natural", "language", "processing", ".", "however", ",", "we", "notice", "that", "several", "recent", "papers", "report", "very", "high", "performance", ",", "which", "largely", "outperforms", "previous", "state", "-", "of", "-", "the", "-", "art", "methods", ".", "in", "this", "paper", ",", "we", "find", "that", "this", "can", "be", "attributed", "to", "the", "inappropriate", "evaluation", "protocol", "used", "by", "them", "and", "propose", "a", "simple", "evaluation", "protocol", "to", "address", "this", "problem", ".", "the", "proposed", "protocol", "is", "robust", "to", "handle", "bias", "in", "the", "model", ",", "which", "can", "substantially", "affect", "the", "final", "results", ".", "we", "conduct", "extensive", "experiments", "and", "report", "performance", "of", "several", "existing", "methods", "using", "our", "protocol", ".", "the", "reproducible", "code", "has", "been", "made", "publicly", "available", "."]}, {"venue": "ACL", "title": "Joint Verification and Reranking for Open Fact Checking Over Tables", "abstract": "Structured information is an important knowledge source for automatic verification of factual claims. Nevertheless, the majority of existing research into this task has focused on textual data, and the few recent inquiries into structured data have been for the closed-domain setting where appropriate evidence for each claim is assumed to have already been retrieved. In this paper, we investigate verification over structured data in the open-domain setting, introducing a joint reranking-and-verification model which fuses evidence documents in the verification component. Our open-domain model achieves performance comparable to the closed-domain state-of-the-art on the TabFact dataset, and demonstrates performance gains from the inclusion of multiple tables as well as a significant improvement over a heuristic retrieval baseline.", "doc_id": "d9129e5290e783123b3ac0581fccefed", "publication_year": 2021, "sentences": ["structured information is an important knowledge source for automatic verification of factual claims .", "nevertheless , the majority of existing research into this task has focused on textual data , and the few recent inquiries into structured data have been for the closed - domain setting where appropriate evidence for each claim is assumed to have already been retrieved .", "in this paper , we investigate verification over structured data in the open - domain setting , introducing a joint reranking - and - verification model which fuses evidence documents in the verification component .", "our open - domain model achieves performance comparable to the closed - domain state - of - the - art on the tabfact dataset , and demonstrates performance gains from the inclusion of multiple tables as well as a significant improvement over a heuristic retrieval baseline ."], "events": [{"event_type": "ITT", "arguments": [{"text": "automatic verification of factual claims", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["automatic", "verification", "of", "factual", "claims"], "offsets": [8, 9, 10, 11, 12]}], "trigger": {"text": "knowledge source", "tokens": ["knowledge", "source"], "offsets": [5, 6]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [64]}, {"text": "joint reranking - and - verification model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["joint", "reranking", "-", "and", "-", "verification", "model"], "offsets": [79, 80, 81, 82, 83, 84, 85]}], "trigger": {"text": "introducing", "tokens": ["introducing"], "offsets": [77]}}, {"event_type": "CMP", "arguments": [{"text": "closed - domain state - of - the - art", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["closed", "-", "domain", "state", "-", "of", "-", "the", "-", "art"], "offsets": [105, 106, 107, 108, 109, 110, 111, 112, 113, 114]}, {"text": "tabfact dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["tabfact", "dataset"], "offsets": [117, 118]}, {"text": "joint reranking - and - verification model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["joint", "reranking", "-", "and", "-", "verification", "model"], "offsets": [79, 80, 81, 82, 83, 84, 85]}, {"text": "performance comparable", "nugget_type": "STR", "argument_type": "Result", "tokens": ["performance", "comparable"], "offsets": [101, 102]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [100]}}, {"event_type": "CMP", "arguments": [{"text": "joint reranking - and - verification model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["joint", "reranking", "-", "and", "-", "verification", "model"], "offsets": [79, 80, 81, 82, 83, 84, 85]}, {"text": "heuristic retrieval baseline", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["heuristic", "retrieval", "baseline"], "offsets": [138, 139, 140]}, {"text": "significant improvement", "nugget_type": "STR", "argument_type": "Result", "tokens": ["significant", "improvement"], "offsets": [134, 135]}], "trigger": {"text": "demonstrates", "tokens": ["demonstrates"], "offsets": [121]}}, {"event_type": "FAC", "arguments": [{"text": "joint reranking - and - verification model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["joint", "reranking", "-", "and", "-", "verification", "model"], "offsets": [79, 80, 81, 82, 83, 84, 85]}, {"text": "performance gains", "nugget_type": "STR", "argument_type": "Object", "tokens": ["performance", "gains"], "offsets": [122, 123]}], "trigger": {"text": "demonstrates", "tokens": ["demonstrates"], "offsets": [121]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [64]}, {"text": "verification over structured data", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["verification", "over", "structured", "data"], "offsets": [66, 67, 68, 69]}, {"text": "in the open - domain setting", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "open", "-", "domain", "setting"], "offsets": [70, 71, 72, 73, 74, 75]}], "trigger": {"text": "investigate", "tokens": ["investigate"], "offsets": [65]}}], "document": ["structured", "information", "is", "an", "important", "knowledge", "source", "for", "automatic", "verification", "of", "factual", "claims", ".", "nevertheless", ",", "the", "majority", "of", "existing", "research", "into", "this", "task", "has", "focused", "on", "textual", "data", ",", "and", "the", "few", "recent", "inquiries", "into", "structured", "data", "have", "been", "for", "the", "closed", "-", "domain", "setting", "where", "appropriate", "evidence", "for", "each", "claim", "is", "assumed", "to", "have", "already", "been", "retrieved", ".", "in", "this", "paper", ",", "we", "investigate", "verification", "over", "structured", "data", "in", "the", "open", "-", "domain", "setting", ",", "introducing", "a", "joint", "reranking", "-", "and", "-", "verification", "model", "which", "fuses", "evidence", "documents", "in", "the", "verification", "component", ".", "our", "open", "-", "domain", "model", "achieves", "performance", "comparable", "to", "the", "closed", "-", "domain", "state", "-", "of", "-", "the", "-", "art", "on", "the", "tabfact", "dataset", ",", "and", "demonstrates", "performance", "gains", "from", "the", "inclusion", "of", "multiple", "tables", "as", "well", "as", "a", "significant", "improvement", "over", "a", "heuristic", "retrieval", "baseline", "."]}, {"venue": "ACL", "title": "MixText: Linguistically-Informed Interpolation of Hidden Space for Semi-Supervised Text Classification", "abstract": "This paper presents MixText, a semi-supervised learning method for text classification, which uses our newly designed data augmentation method called TMix. TMix creates a large amount of augmented training samples by interpolating text in hidden space. Moreover, we leverage recent advances in data augmentation to guess low-entropy labels for unlabeled data, hence making them as easy to use as labeled data. By mixing labeled, unlabeled and augmented data, MixText significantly outperformed current pre-trained and fined-tuned models and other state-of-the-art semi-supervised learning methods on several text classification benchmarks. The improvement is especially prominent when supervision is extremely limited. We have publicly released our code at https://github.com/GT-SALT/MixText.", "doc_id": "2da194f0a0467502eb155511460ba2aa", "publication_year": 2020, "sentences": ["this paper presents mixtext , a semi - supervised learning method for text classification , which uses our newly designed data augmentation method called tmix .", "tmix creates a large amount of augmented training samples by interpolating text in hidden space .", "moreover , we leverage recent advances in data augmentation to guess low - entropy labels for unlabeled data , hence making them as easy to use as labeled data .", "by mixing labeled , unlabeled and augmented data , mixtext significantly outperformed current pre - trained and fined - tuned models and other state - of - the - art semi - supervised learning methods on several text classification benchmarks .", "the improvement is especially prominent when supervision is extremely limited .", "we have publicly released our code at https : / / github . com / gt - salt / mixtext ."], "events": [{"event_type": "PRP", "arguments": [{"text": "mixtext", "nugget_type": "APP", "argument_type": "Content", "tokens": ["mixtext"], "offsets": [3]}, {"text": "text classification", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["text", "classification"], "offsets": [12, 13]}], "trigger": {"text": "presents", "tokens": ["presents"], "offsets": [2]}}, {"event_type": "MDS", "arguments": [{"text": "newly designed data augmentation method", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["newly", "designed", "data", "augmentation", "method"], "offsets": [18, 19, 20, 21, 22]}], "trigger": {"text": "uses", "tokens": ["uses"], "offsets": [16]}}, {"event_type": "MDS", "arguments": [{"text": "text in hidden space", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["text", "in", "hidden", "space"], "offsets": [37, 38, 39, 40]}, {"text": "creates", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["creates"], "offsets": [27]}], "trigger": {"text": "interpolating", "tokens": ["interpolating"], "offsets": [36]}}, {"event_type": "PUR", "arguments": [{"text": "large amount of augmented training samples", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["large", "amount", "of", "augmented", "training", "samples"], "offsets": [29, 30, 31, 32, 33, 34]}], "trigger": {"text": "creates", "tokens": ["creates"], "offsets": [27]}}, {"event_type": "MDS", "arguments": [{"text": "recent advances in data augmentation", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["recent", "advances", "in", "data", "augmentation"], "offsets": [46, 47, 48, 49, 50]}, {"text": "guess", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["guess"], "offsets": [52]}], "trigger": {"text": "leverage", "tokens": ["leverage"], "offsets": [45]}}, {"event_type": "PUR", "arguments": [{"text": "low - entropy labels for unlabeled data", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["low", "-", "entropy", "labels", "for", "unlabeled", "data"], "offsets": [53, 54, 55, 56, 57, 58, 59]}], "trigger": {"text": "guess", "tokens": ["guess"], "offsets": [52]}}, {"event_type": "CMP", "arguments": [{"text": "mixtext", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["mixtext"], "offsets": [81]}, {"text": "outperformed", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperformed"], "offsets": [83]}, {"text": "significantly", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significantly"], "offsets": [82]}, {"text": "state - of - the - art semi - supervised learning methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art", "semi", "-", "supervised", "learning", "methods"], "offsets": [95, 96, 97, 98, 99, 100, 101, 102, 103, 104, 105, 106]}, {"text": "on several text classification benchmarks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "several", "text", "classification", "benchmarks"], "offsets": [107, 108, 109, 110, 111]}, {"text": "by mixing labeled , unlabeled and augmented data", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["by", "mixing", "labeled", ",", "unlabeled", "and", "augmented", "data"], "offsets": [72, 73, 74, 75, 76, 77, 78, 79]}, {"text": "current pre - trained models", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["current", "pre", "-", "trained", "models"], "offsets": [84, 85, 86, 87, 92]}], "trigger": {"text": "outperformed", "tokens": ["outperformed"], "offsets": [83]}}], "document": ["this", "paper", "presents", "mixtext", ",", "a", "semi", "-", "supervised", "learning", "method", "for", "text", "classification", ",", "which", "uses", "our", "newly", "designed", "data", "augmentation", "method", "called", "tmix", ".", "tmix", "creates", "a", "large", "amount", "of", "augmented", "training", "samples", "by", "interpolating", "text", "in", "hidden", "space", ".", "moreover", ",", "we", "leverage", "recent", "advances", "in", "data", "augmentation", "to", "guess", "low", "-", "entropy", "labels", "for", "unlabeled", "data", ",", "hence", "making", "them", "as", "easy", "to", "use", "as", "labeled", "data", ".", "by", "mixing", "labeled", ",", "unlabeled", "and", "augmented", "data", ",", "mixtext", "significantly", "outperformed", "current", "pre", "-", "trained", "and", "fined", "-", "tuned", "models", "and", "other", "state", "-", "of", "-", "the", "-", "art", "semi", "-", "supervised", "learning", "methods", "on", "several", "text", "classification", "benchmarks", ".", "the", "improvement", "is", "especially", "prominent", "when", "supervision", "is", "extremely", "limited", ".", "we", "have", "publicly", "released", "our", "code", "at", "https", ":", "/", "/", "github", ".", "com", "/", "gt", "-", "salt", "/", "mixtext", "."]}, {"venue": "ACL", "title": "Not All Claims are Created Equal: Choosing the Right Statistical Approach to Assess Hypotheses", "abstract": "Empirical research in Natural Language Processing (NLP) has adopted a narrow set of principles for assessing hypotheses, relying mainly on p-value computation, which suffers from several known issues. While alternative proposals have been well-debated and adopted in other fields, they remain rarely discussed or used within the NLP community. We address this gap by contrasting various hypothesis assessment techniques, especially those not commonly used in the field (such as evaluations based on Bayesian inference). Since these statistical techniques differ in the hypotheses they can support, we argue that practitioners should first decide their target hypothesis before choosing an assessment method. This is crucial because common fallacies, misconceptions, and misinterpretation surrounding hypothesis assessment methods often stem from a discrepancy between what one would like to claim versus what the method used actually assesses. Our survey reveals that these issues are omnipresent in the NLP research community. As a step forward, we provide best practices and guidelines tailored to NLP research, as well as an easy-to-use package for Bayesian assessment of hypotheses, complementing existing tools.", "doc_id": "4cd8bd44660b723bdddc54fc2919041f", "publication_year": 2020, "sentences": ["empirical research in natural language processing ( nlp ) has adopted a narrow set of principles for assessing hypotheses , relying mainly on p - value computation , which suffers from several known issues .", "while alternative proposals have been well - debated and adopted in other fields , they remain rarely discussed or used within the nlp community .", "we address this gap by contrasting various hypothesis assessment techniques , especially those not commonly used in the field ( such as evaluations based on bayesian inference ) .", "since these statistical techniques differ in the hypotheses they can support , we argue that practitioners should first decide their target hypothesis before choosing an assessment method .", "this is crucial because common fallacies , misconceptions , and misinterpretation surrounding hypothesis assessment methods often stem from a discrepancy between what one would like to claim versus what the method used actually assesses .", "our survey reveals that these issues are omnipresent in the nlp research community .", "as a step forward , we provide best practices and guidelines tailored to nlp research , as well as an easy - to - use package for bayesian assessment of hypotheses , complementing existing tools ."], "events": [{"event_type": "ITT", "arguments": [{"text": "empirical research in natural language processing", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["empirical", "research", "in", "natural", "language", "processing"], "offsets": [0, 1, 2, 3, 4, 5]}], "trigger": {"text": "adopted", "tokens": ["adopted"], "offsets": [10]}}, {"event_type": "MDS", "arguments": [{"text": "various hypothesis assessment techniques", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["various", "hypothesis", "assessment", "techniques"], "offsets": [66, 67, 68, 69]}, {"text": "address", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["address"], "offsets": [61]}], "trigger": {"text": "contrasting", "tokens": ["contrasting"], "offsets": [65]}}, {"event_type": "PUR", "arguments": [{"text": "gap", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["gap"], "offsets": [63]}], "trigger": {"text": "address", "tokens": ["address"], "offsets": [61]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [171]}, {"text": "nlp research", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["nlp", "research"], "offsets": [179, 180]}, {"text": "best practices", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["best", "practices"], "offsets": [173, 174]}, {"text": "best guidelines", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["best", "guidelines"], "offsets": [173, 176]}], "trigger": {"text": "provide", "tokens": ["provide"], "offsets": [172]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [171]}, {"text": "easy - to - use package", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["easy", "-", "to", "-", "use", "package"], "offsets": [186, 187, 188, 189, 190, 191]}, {"text": "complementing", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["complementing"], "offsets": [198]}, {"text": "bayesian assessment of hypotheses", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["bayesian", "assessment", "of", "hypotheses"], "offsets": [193, 194, 195, 196]}], "trigger": {"text": "provide", "tokens": ["provide"], "offsets": [172]}}, {"event_type": "PUR", "arguments": [{"text": "existing tools", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["existing", "tools"], "offsets": [199, 200]}], "trigger": {"text": "complementing", "tokens": ["complementing"], "offsets": [198]}}], "document": ["empirical", "research", "in", "natural", "language", "processing", "(", "nlp", ")", "has", "adopted", "a", "narrow", "set", "of", "principles", "for", "assessing", "hypotheses", ",", "relying", "mainly", "on", "p", "-", "value", "computation", ",", "which", "suffers", "from", "several", "known", "issues", ".", "while", "alternative", "proposals", "have", "been", "well", "-", "debated", "and", "adopted", "in", "other", "fields", ",", "they", "remain", "rarely", "discussed", "or", "used", "within", "the", "nlp", "community", ".", "we", "address", "this", "gap", "by", "contrasting", "various", "hypothesis", "assessment", "techniques", ",", "especially", "those", "not", "commonly", "used", "in", "the", "field", "(", "such", "as", "evaluations", "based", "on", "bayesian", "inference", ")", ".", "since", "these", "statistical", "techniques", "differ", "in", "the", "hypotheses", "they", "can", "support", ",", "we", "argue", "that", "practitioners", "should", "first", "decide", "their", "target", "hypothesis", "before", "choosing", "an", "assessment", "method", ".", "this", "is", "crucial", "because", "common", "fallacies", ",", "misconceptions", ",", "and", "misinterpretation", "surrounding", "hypothesis", "assessment", "methods", "often", "stem", "from", "a", "discrepancy", "between", "what", "one", "would", "like", "to", "claim", "versus", "what", "the", "method", "used", "actually", "assesses", ".", "our", "survey", "reveals", "that", "these", "issues", "are", "omnipresent", "in", "the", "nlp", "research", "community", ".", "as", "a", "step", "forward", ",", "we", "provide", "best", "practices", "and", "guidelines", "tailored", "to", "nlp", "research", ",", "as", "well", "as", "an", "easy", "-", "to", "-", "use", "package", "for", "bayesian", "assessment", "of", "hypotheses", ",", "complementing", "existing", "tools", "."]}, {"venue": "ACL", "title": "Structural Information Preserving for Graph-to-Text Generation", "abstract": "The task of graph-to-text generation aims at producing sentences that preserve the meaning of input graphs. As a crucial defect, the current state-of-the-art models may mess up or even drop the core structural information of input graphs when generating outputs. We propose to tackle this problem by leveraging richer training signals that can guide our model for preserving input information. In particular, we introduce two types of autoencoding losses, each individually focusing on different aspects (a.k.a. views) of input graphs. The losses are then back-propagated to better calibrate our model via multi-task training. Experiments on two benchmarks for graph-to-text generation show the effectiveness of our approach over a state-of-the-art baseline.", "doc_id": "715a6ea18126aaa785f40c309bab6c50", "publication_year": 2020, "sentences": ["the task of graph - to - text generation aims at producing sentences that preserve the meaning of input graphs .", "as a crucial defect , the current state - of - the - art models may mess up or even drop the core structural information of input graphs when generating outputs .", "we propose to tackle this problem by leveraging richer training signals that can guide our model for preserving input information .", "in particular , we introduce two types of autoencoding losses , each individually focusing on different aspects ( a . k . a . views ) of input graphs .", "the losses are then back - propagated to better calibrate our model via multi - task training .", "experiments on two benchmarks for graph - to - text generation show the effectiveness of our approach over a state - of - the - art baseline ."], "events": [{"event_type": "ITT", "arguments": [{"text": "graph - to - text generation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["graph", "-", "to", "-", "text", "generation"], "offsets": [3, 4, 5, 6, 7, 8]}], "trigger": {"text": "producing", "tokens": ["producing"], "offsets": [11]}}, {"event_type": "RWF", "arguments": [{"text": "crucial defect", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["crucial", "defect"], "offsets": [23, 24]}], "trigger": {"text": "crucial defect", "tokens": ["crucial", "defect"], "offsets": [23, 24]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [53]}, {"text": "core structural information of input graphs", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["core", "structural", "information", "of", "input", "graphs"], "offsets": [43, 44, 45, 46, 47, 48]}], "trigger": {"text": "tackle", "tokens": ["tackle"], "offsets": [56]}}, {"event_type": "MDS", "arguments": [{"text": "richer training signals", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["richer", "training", "signals"], "offsets": [61, 62, 63]}], "trigger": {"text": "leveraging", "tokens": ["leveraging"], "offsets": [60]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [77]}, {"text": "two types of autoencoding losses", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["two", "types", "of", "autoencoding", "losses"], "offsets": [79, 80, 81, 82, 83]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [78]}}, {"event_type": "WKS", "arguments": [{"text": "losses", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["losses"], "offsets": [105]}, {"text": "better calibrate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["better", "calibrate"], "offsets": [112, 113]}], "trigger": {"text": "back - propagated", "tokens": ["back", "-", "propagated"], "offsets": [108, 109, 110]}}, {"event_type": "PUR", "arguments": [{"text": "model", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["model"], "offsets": [115]}], "trigger": {"text": "better calibrate", "tokens": ["better", "calibrate"], "offsets": [112, 113]}}, {"event_type": "FIN", "arguments": [{"text": "effectiveness", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["effectiveness"], "offsets": [135]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [133]}}, {"event_type": "CMP", "arguments": [{"text": "state - of - the - art baseline", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art", "baseline"], "offsets": [141, 142, 143, 144, 145, 146, 147, 148]}, {"text": "approach", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["approach"], "offsets": [138]}], "trigger": {"text": "effectiveness", "tokens": ["effectiveness"], "offsets": [135]}}], "document": ["the", "task", "of", "graph", "-", "to", "-", "text", "generation", "aims", "at", "producing", "sentences", "that", "preserve", "the", "meaning", "of", "input", "graphs", ".", "as", "a", "crucial", "defect", ",", "the", "current", "state", "-", "of", "-", "the", "-", "art", "models", "may", "mess", "up", "or", "even", "drop", "the", "core", "structural", "information", "of", "input", "graphs", "when", "generating", "outputs", ".", "we", "propose", "to", "tackle", "this", "problem", "by", "leveraging", "richer", "training", "signals", "that", "can", "guide", "our", "model", "for", "preserving", "input", "information", ".", "in", "particular", ",", "we", "introduce", "two", "types", "of", "autoencoding", "losses", ",", "each", "individually", "focusing", "on", "different", "aspects", "(", "a", ".", "k", ".", "a", ".", "views", ")", "of", "input", "graphs", ".", "the", "losses", "are", "then", "back", "-", "propagated", "to", "better", "calibrate", "our", "model", "via", "multi", "-", "task", "training", ".", "experiments", "on", "two", "benchmarks", "for", "graph", "-", "to", "-", "text", "generation", "show", "the", "effectiveness", "of", "our", "approach", "over", "a", "state", "-", "of", "-", "the", "-", "art", "baseline", "."]}, {"venue": "ACL", "title": "Learning to Generate Programs for Table Fact Verification via Structure-Aware Semantic Parsing", "abstract": "Table fact verification aims to check the correctness of textual statements based on given semi-structured data. Most existing methods are devoted to better comprehending logical operations and tables, but they hardly study generating latent programs from statements, with which we can not only retrieve evidences efficiently but also explain reasons behind verifications naturally. However, it is challenging to get correct programs with existing weakly supervised semantic parsers due to the huge search space with lots of spurious programs. In this paper, we address the challenge by leveraging both lexical features and structure features for program generation. Through analyzing the connection between the program tree and the dependency tree, we define a unified concept, operation-oriented tree, to mine structure features, and introduce Structure-Aware Semantic Parsing to integrate structure features into program generation. Moreover, we design a refined objective function with lexical features and violation punishments to further avoid spurious programs. Experimental results show that our proposed method generates programs more accurately than existing semantic parsers, and achieves comparable performance to the SOTA on the large-scale benchmark TABFACT.", "doc_id": "3b3299f51b94e84758f5f4eed2915c55", "publication_year": 2022, "sentences": ["table fact verification aims to check the correctness of textual statements based on given semi - structured data .", "most existing methods are devoted to better comprehending logical operations and tables , but they hardly study generating latent programs from statements , with which we can not only retrieve evidences efficiently but also explain reasons behind verifications naturally .", "however , it is challenging to get correct programs with existing weakly supervised semantic parsers due to the huge search space with lots of spurious programs .", "in this paper , we address the challenge by leveraging both lexical features and structure features for program generation .", "through analyzing the connection between the program tree and the dependency tree , we define a unified concept , operation - oriented tree , to mine structure features , and introduce structure - aware semantic parsing to integrate structure features into program generation .", "moreover , we design a refined objective function with lexical features and violation punishments to further avoid spurious programs .", "experimental results show that our proposed method generates programs more accurately than existing semantic parsers , and achieves comparable performance to the sota on the large - scale benchmark tabfact ."], "events": [{"event_type": "ITT", "arguments": [{"text": "table fact verification", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["table", "fact", "verification"], "offsets": [0, 1, 2]}], "trigger": {"text": "check", "tokens": ["check"], "offsets": [5]}}, {"event_type": "RWF", "arguments": [{"text": "existing weakly supervised semantic parsers", "nugget_type": "MOD", "argument_type": "Concern", "tokens": ["existing", "weakly", "supervised", "semantic", "parsers"], "offsets": [69, 70, 71, 72, 73]}, {"text": "lots of spurious programs", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["lots", "of", "spurious", "programs"], "offsets": [81, 82, 83, 84]}, {"text": "challenging", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["challenging"], "offsets": [63]}, {"text": "get", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["get"], "offsets": [65]}], "trigger": {"text": "challenging", "tokens": ["challenging"], "offsets": [63]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [90]}, {"text": "program generation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["program", "generation"], "offsets": [103, 104]}, {"text": "lexical features", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["lexical", "features"], "offsets": [97, 98]}, {"text": "structure features", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["structure", "features"], "offsets": [100, 101]}], "trigger": {"text": "leveraging", "tokens": ["leveraging"], "offsets": [95]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [119]}, {"text": "operation - oriented tree", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["operation", "-", "oriented", "tree"], "offsets": [125, 126, 127, 128]}, {"text": "mine", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["mine"], "offsets": [131]}], "trigger": {"text": "define", "tokens": ["define"], "offsets": [120]}}, {"event_type": "MDS", "arguments": [{"text": "structure features", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["structure", "features"], "offsets": [144, 145]}, {"text": "program generation", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["program", "generation"], "offsets": [147, 148]}], "trigger": {"text": "integrate", "tokens": ["integrate"], "offsets": [143]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [152]}, {"text": "refined objective function", "nugget_type": "APP", "argument_type": "Content", "tokens": ["refined", "objective", "function"], "offsets": [155, 156, 157]}, {"text": "further avoid", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["further", "avoid"], "offsets": [165, 166]}], "trigger": {"text": "design", "tokens": ["design"], "offsets": [153]}}, {"event_type": "PUR", "arguments": [{"text": "spurious programs", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["spurious", "programs"], "offsets": [167, 168]}], "trigger": {"text": "further avoid", "tokens": ["further", "avoid"], "offsets": [165, 166]}}, {"event_type": "CMP", "arguments": [{"text": "existing semantic parsers", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["existing", "semantic", "parsers"], "offsets": [182, 183, 184]}, {"text": "programs", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["programs"], "offsets": [178]}, {"text": "more accurately", "nugget_type": "STR", "argument_type": "Result", "tokens": ["more", "accurately"], "offsets": [179, 180]}, {"text": "refined objective function", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["refined", "objective", "function"], "offsets": [155, 156, 157]}], "trigger": {"text": "generates", "tokens": ["generates"], "offsets": [177]}}, {"event_type": "CMP", "arguments": [{"text": "comparable performance", "nugget_type": "STR", "argument_type": "Result", "tokens": ["comparable", "performance"], "offsets": [188, 189]}, {"text": "sota", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["sota"], "offsets": [192]}, {"text": "on the large - scale benchmark tabfact", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "the", "large", "-", "scale", "benchmark", "tabfact"], "offsets": [193, 194, 195, 196, 197, 198, 199]}, {"text": "refined objective function", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["refined", "objective", "function"], "offsets": [155, 156, 157]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [187]}}, {"event_type": "PUR", "arguments": [{"text": "correct programs", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["correct", "programs"], "offsets": [66, 67]}], "trigger": {"text": "get", "tokens": ["get"], "offsets": [65]}}, {"event_type": "WKS", "arguments": [{"text": "connection", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["connection"], "offsets": [109]}, {"text": "between the program tree and the dependency tree", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["between", "the", "program", "tree", "and", "the", "dependency", "tree"], "offsets": [110, 111, 112, 113, 114, 115, 116, 117]}], "trigger": {"text": "analyzing", "tokens": ["analyzing"], "offsets": [107]}}, {"event_type": "PUR", "arguments": [{"text": "structure features", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["structure", "features"], "offsets": [132, 133]}], "trigger": {"text": "mine", "tokens": ["mine"], "offsets": [131]}}, {"event_type": "FIN", "arguments": [{"text": "generates", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["generates"], "offsets": [177]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [172]}}], "document": ["table", "fact", "verification", "aims", "to", "check", "the", "correctness", "of", "textual", "statements", "based", "on", "given", "semi", "-", "structured", "data", ".", "most", "existing", "methods", "are", "devoted", "to", "better", "comprehending", "logical", "operations", "and", "tables", ",", "but", "they", "hardly", "study", "generating", "latent", "programs", "from", "statements", ",", "with", "which", "we", "can", "not", "only", "retrieve", "evidences", "efficiently", "but", "also", "explain", "reasons", "behind", "verifications", "naturally", ".", "however", ",", "it", "is", "challenging", "to", "get", "correct", "programs", "with", "existing", "weakly", "supervised", "semantic", "parsers", "due", "to", "the", "huge", "search", "space", "with", "lots", "of", "spurious", "programs", ".", "in", "this", "paper", ",", "we", "address", "the", "challenge", "by", "leveraging", "both", "lexical", "features", "and", "structure", "features", "for", "program", "generation", ".", "through", "analyzing", "the", "connection", "between", "the", "program", "tree", "and", "the", "dependency", "tree", ",", "we", "define", "a", "unified", "concept", ",", "operation", "-", "oriented", "tree", ",", "to", "mine", "structure", "features", ",", "and", "introduce", "structure", "-", "aware", "semantic", "parsing", "to", "integrate", "structure", "features", "into", "program", "generation", ".", "moreover", ",", "we", "design", "a", "refined", "objective", "function", "with", "lexical", "features", "and", "violation", "punishments", "to", "further", "avoid", "spurious", "programs", ".", "experimental", "results", "show", "that", "our", "proposed", "method", "generates", "programs", "more", "accurately", "than", "existing", "semantic", "parsers", ",", "and", "achieves", "comparable", "performance", "to", "the", "sota", "on", "the", "large", "-", "scale", "benchmark", "tabfact", "."]}, {"venue": "ACL", "title": "E3: Entailment-driven Extracting and Editing for Conversational Machine Reading", "abstract": "Conversational machine reading systems help users answer high-level questions (e.g. determine if they qualify for particular government benefits) when they do not know the exact rules by which the determination is made (e.g. whether they need certain income levels or veteran status). The key challenge is that these rules are only provided in the form of a procedural text (e.g. guidelines from government website) which the system must read to figure out what to ask the user. We present a new conversational machine reading model that jointly extracts a set of decision rules from the procedural text while reasoning about which are entailed by the conversational history and which still need to be edited to create questions for the user. On the recently introduced ShARC conversational machine reading dataset, our Entailment-driven Extract and Edit network (E3) achieves a new state-of-the-art, outperforming existing systems as well as a new BERT-based baseline. In addition, by explicitly highlighting which information still needs to be gathered, E3 provides a more explainable alternative to prior work. We release source code for our models and experiments at https://github.com/vzhong/e3.", "doc_id": "059a5b16e73549e1a49ede1e422e2d36", "publication_year": 2019, "sentences": ["conversational machine reading systems help users answer high - level questions ( e . g . determine if they qualify for particular government benefits ) when they do not know the exact rules by which the determination is made ( e . g . whether they need certain income levels or veteran status ) .", "the key challenge is that these rules are only provided in the form of a procedural text ( e . g . guidelines from government website ) which the system must read to figure out what to ask the user .", "we present a new conversational machine reading model that jointly extracts a set of decision rules from the procedural text while reasoning about which are entailed by the conversational history and which still need to be edited to create questions for the user .", "on the recently introduced sharc conversational machine reading dataset , our entailment - driven extract and edit network ( e3 ) achieves a new state - of - the - art , outperforming existing systems as well as a new bert - based baseline .", "in addition , by explicitly highlighting which information still needs to be gathered , e3 provides a more explainable alternative to prior work .", "we release source code for our models and experiments at https : / / github . com / vzhong / e3 ."], "events": [{"event_type": "ITT", "arguments": [{"text": "conversational machine reading systems", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["conversational", "machine", "reading", "systems"], "offsets": [0, 1, 2, 3]}], "trigger": {"text": "help", "tokens": ["help"], "offsets": [4]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [96]}, {"text": "conversational machine reading model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["conversational", "machine", "reading", "model"], "offsets": [100, 101, 102, 103]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [97]}}, {"event_type": "FAC", "arguments": [{"text": "sharc conversational machine reading dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["sharc", "conversational", "machine", "reading", "dataset"], "offsets": [144, 145, 146, 147, 148]}, {"text": "state - of - the - art", "nugget_type": "APP", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art"], "offsets": [164, 165, 166, 167, 168, 169, 170]}, {"text": "entailment - driven extract and edit network", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["entailment", "-", "driven", "extract", "and", "edit", "network"], "offsets": [151, 152, 153, 154, 155, 156, 157]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [161]}}, {"event_type": "CMP", "arguments": [{"text": "sharc conversational machine reading dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["sharc", "conversational", "machine", "reading", "dataset"], "offsets": [144, 145, 146, 147, 148]}, {"text": "outperforming", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforming"], "offsets": [172]}, {"text": "entailment - driven extract and edit network", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["entailment", "-", "driven", "extract", "and", "edit", "network"], "offsets": [151, 152, 153, 154, 155, 156, 157]}], "trigger": {"text": "outperforming", "tokens": ["outperforming"], "offsets": [172]}}, {"event_type": "FAC", "arguments": [{"text": "entailment - driven extract and edit network", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["entailment", "-", "driven", "extract", "and", "edit", "network"], "offsets": [151, 152, 153, 154, 155, 156, 157]}, {"text": "more explainable alternative", "nugget_type": "APP", "argument_type": "Object", "tokens": ["more", "explainable", "alternative"], "offsets": [202, 203, 204]}, {"text": "by explicitly highlighting which information still needs to be gathered", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["by", "explicitly", "highlighting", "which", "information", "still", "needs", "to", "be", "gathered"], "offsets": [188, 189, 190, 191, 192, 193, 194, 195, 196, 197]}], "trigger": {"text": "provides", "tokens": ["provides"], "offsets": [200]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [209]}, {"text": "source code", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["source", "code"], "offsets": [211, 212]}, {"text": "at https : / / github . com / vzhong / e3", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["at", "https", ":", "/", "/", "github", ".", "com", "/", "vzhong", "/", "e3"], "offsets": [218, 219, 220, 221, 222, 223, 224, 225, 226, 227, 228, 229]}], "trigger": {"text": "release", "tokens": ["release"], "offsets": [210]}}], "document": ["conversational", "machine", "reading", "systems", "help", "users", "answer", "high", "-", "level", "questions", "(", "e", ".", "g", ".", "determine", "if", "they", "qualify", "for", "particular", "government", "benefits", ")", "when", "they", "do", "not", "know", "the", "exact", "rules", "by", "which", "the", "determination", "is", "made", "(", "e", ".", "g", ".", "whether", "they", "need", "certain", "income", "levels", "or", "veteran", "status", ")", ".", "the", "key", "challenge", "is", "that", "these", "rules", "are", "only", "provided", "in", "the", "form", "of", "a", "procedural", "text", "(", "e", ".", "g", ".", "guidelines", "from", "government", "website", ")", "which", "the", "system", "must", "read", "to", "figure", "out", "what", "to", "ask", "the", "user", ".", "we", "present", "a", "new", "conversational", "machine", "reading", "model", "that", "jointly", "extracts", "a", "set", "of", "decision", "rules", "from", "the", "procedural", "text", "while", "reasoning", "about", "which", "are", "entailed", "by", "the", "conversational", "history", "and", "which", "still", "need", "to", "be", "edited", "to", "create", "questions", "for", "the", "user", ".", "on", "the", "recently", "introduced", "sharc", "conversational", "machine", "reading", "dataset", ",", "our", "entailment", "-", "driven", "extract", "and", "edit", "network", "(", "e3", ")", "achieves", "a", "new", "state", "-", "of", "-", "the", "-", "art", ",", "outperforming", "existing", "systems", "as", "well", "as", "a", "new", "bert", "-", "based", "baseline", ".", "in", "addition", ",", "by", "explicitly", "highlighting", "which", "information", "still", "needs", "to", "be", "gathered", ",", "e3", "provides", "a", "more", "explainable", "alternative", "to", "prior", "work", ".", "we", "release", "source", "code", "for", "our", "models", "and", "experiments", "at", "https", ":", "/", "/", "github", ".", "com", "/", "vzhong", "/", "e3", "."]}, {"venue": "ACL", "title": "Text Classification with Negative Supervision", "abstract": "Advanced pre-trained models for text representation have achieved state-of-the-art performance on various text classification tasks. However, the discrepancy between the semantic similarity of texts and labelling standards affects classifiers, i.e. leading to lower performance in cases where classifiers should assign different labels to semantically similar texts. To address this problem, we propose a simple multitask learning model that uses negative supervision. Specifically, our model encourages texts with different labels to have distinct representations. Comprehensive experiments show that our model outperforms the state-of-the-art pre-trained model on both single- and multi-label classifications, sentence and document classifications, and classifications in three different languages.", "doc_id": "6d16c00268634192b6c842e6f147b941", "publication_year": 2020, "sentences": ["advanced pre - trained models for text representation have achieved state - of - the - art performance on various text classification tasks .", "however , the discrepancy between the semantic similarity of texts and labelling standards affects classifiers , i . e . leading to lower performance in cases where classifiers should assign different labels to semantically similar texts .", "to address this problem , we propose a simple multitask learning model that uses negative supervision .", "specifically , our model encourages texts with different labels to have distinct representations .", "comprehensive experiments show that our model outperforms the state - of - the - art pre - trained model on both single - and multi - label classifications , sentence and document classifications , and classifications in three different languages ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [66]}, {"text": "simple multitask learning model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["simple", "multitask", "learning", "model"], "offsets": [69, 70, 71, 72]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [67]}}, {"event_type": "FIN", "arguments": [{"text": "outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforms"], "offsets": [98]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [94]}}, {"event_type": "CMP", "arguments": [{"text": "simple multitask learning model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["simple", "multitask", "learning", "model"], "offsets": [69, 70, 71, 72]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [98]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [98]}}], "document": ["advanced", "pre", "-", "trained", "models", "for", "text", "representation", "have", "achieved", "state", "-", "of", "-", "the", "-", "art", "performance", "on", "various", "text", "classification", "tasks", ".", "however", ",", "the", "discrepancy", "between", "the", "semantic", "similarity", "of", "texts", "and", "labelling", "standards", "affects", "classifiers", ",", "i", ".", "e", ".", "leading", "to", "lower", "performance", "in", "cases", "where", "classifiers", "should", "assign", "different", "labels", "to", "semantically", "similar", "texts", ".", "to", "address", "this", "problem", ",", "we", "propose", "a", "simple", "multitask", "learning", "model", "that", "uses", "negative", "supervision", ".", "specifically", ",", "our", "model", "encourages", "texts", "with", "different", "labels", "to", "have", "distinct", "representations", ".", "comprehensive", "experiments", "show", "that", "our", "model", "outperforms", "the", "state", "-", "of", "-", "the", "-", "art", "pre", "-", "trained", "model", "on", "both", "single", "-", "and", "multi", "-", "label", "classifications", ",", "sentence", "and", "document", "classifications", ",", "and", "classifications", "in", "three", "different", "languages", "."]}, {"venue": "ACL", "title": "JointCL: A Joint Contrastive Learning Framework for Zero-Shot Stance Detection", "abstract": "Zero-shot stance detection (ZSSD) aims to detect the stance for an unseen target during the inference stage. In this paper, we propose a joint contrastive learning (JointCL) framework, which consists of stance contrastive learning and target-aware prototypical graph contrastive learning. Specifically, a stance contrastive learning strategy is employed to better generalize stance features for unseen targets. Further, we build a prototypical graph for each instance to learn the target-based representation, in which the prototypes are deployed as a bridge to share the graph structures between the known targets and the unseen ones. Then a novel target-aware prototypical graph contrastive learning strategy is devised to generalize the reasoning ability of target-based stance representations to the unseen targets. Extensive experiments on three benchmark datasets show that the proposed approach achieves state-of-the-art performance in the ZSSD task.", "doc_id": "a78e771a19b272272890b83888ce9766", "publication_year": 2022, "sentences": ["zero - shot stance detection ( zssd ) aims to detect the stance for an unseen target during the inference stage .", "in this paper , we propose a joint contrastive learning ( jointcl ) framework , which consists of stance contrastive learning and target - aware prototypical graph contrastive learning .", "specifically , a stance contrastive learning strategy is employed to better generalize stance features for unseen targets .", "further , we build a prototypical graph for each instance to learn the target - based representation , in which the prototypes are deployed as a bridge to share the graph structures between the known targets and the unseen ones .", "then a novel target - aware prototypical graph contrastive learning strategy is devised to generalize the reasoning ability of target - based stance representations to the unseen targets .", "extensive experiments on three benchmark datasets show that the proposed approach achieves state - of - the - art performance in the zssd task ."], "events": [{"event_type": "ITT", "arguments": [{"text": "zero - shot stance detection", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["zero", "-", "shot", "stance", "detection"], "offsets": [0, 1, 2, 3, 4]}], "trigger": {"text": "detect", "tokens": ["detect"], "offsets": [10]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [26]}, {"text": "joint contrastive learning ( jointcl ) framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["joint", "contrastive", "learning", "framework"], "offsets": [29, 30, 31, 35]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [27]}}, {"event_type": "MDS", "arguments": [{"text": "unseen targets", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["unseen", "targets"], "offsets": [67, 68]}, {"text": "stance contrastive learning strategy", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["stance", "contrastive", "learning", "strategy"], "offsets": [55, 56, 57, 58]}, {"text": "stance features", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["stance", "features"], "offsets": [64, 65]}], "trigger": {"text": "generalize", "tokens": ["generalize"], "offsets": [63]}}, {"event_type": "MDS", "arguments": [{"text": "prototypical graph", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["prototypical", "graph"], "offsets": [75, 76]}, {"text": "each instance", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["each", "instance"], "offsets": [78, 79]}, {"text": "learn", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["learn"], "offsets": [81]}], "trigger": {"text": "build", "tokens": ["build"], "offsets": [73]}}, {"event_type": "MDS", "arguments": [{"text": "prototypes", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["prototypes"], "offsets": [91]}, {"text": "bridge", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["bridge"], "offsets": [96]}, {"text": "share", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["share"], "offsets": [98]}], "trigger": {"text": "deployed", "tokens": ["deployed"], "offsets": [93]}}, {"event_type": "PRP", "arguments": [{"text": "target - aware prototypical graph contrastive learning strategy", "nugget_type": "APP", "argument_type": "Content", "tokens": ["target", "-", "aware", "prototypical", "graph", "contrastive", "learning", "strategy"], "offsets": [114, 115, 116, 117, 118, 119, 120, 121]}, {"text": "generalize", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["generalize"], "offsets": [125]}], "trigger": {"text": "devised", "tokens": ["devised"], "offsets": [123]}}, {"event_type": "PUR", "arguments": [{"text": "reasoning ability of target - based stance representations", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["reasoning", "ability", "of", "target", "-", "based", "stance", "representations"], "offsets": [127, 128, 129, 130, 131, 132, 133, 134]}, {"text": "to the unseen targets", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["to", "the", "unseen", "targets"], "offsets": [135, 136, 137, 138]}], "trigger": {"text": "generalize", "tokens": ["generalize"], "offsets": [125]}}, {"event_type": "FAC", "arguments": [{"text": "zssd task", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["zero", "-", "shot", "stance", "detection", "task"], "offsets": [0, 1, 2, 3, 4, 163]}, {"text": "three benchmark datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["three", "benchmark", "datasets"], "offsets": [143, 144, 145]}, {"text": "target - aware prototypical graph contrastive learning strategy", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["target", "-", "aware", "prototypical", "graph", "contrastive", "learning", "strategy"], "offsets": [114, 115, 116, 117, 118, 119, 120, 121]}, {"text": "state - of - the - art performance", "nugget_type": "STR", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art", "performance"], "offsets": [152, 153, 154, 155, 156, 157, 158, 159]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [151]}}, {"event_type": "PUR", "arguments": [{"text": "target - based representation", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["target", "-", "based", "representation"], "offsets": [83, 84, 85, 86]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [81]}}, {"event_type": "PUR", "arguments": [{"text": "graph structures", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["graph", "structures"], "offsets": [100, 101]}, {"text": "between the known targets and the unseen ones", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["between", "the", "known", "targets", "and", "the", "unseen", "ones"], "offsets": [102, 103, 104, 105, 106, 107, 108, 109]}], "trigger": {"text": "share", "tokens": ["share"], "offsets": [98]}}, {"event_type": "FIN", "arguments": [{"text": "achieves", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["achieves"], "offsets": [151]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [146]}}], "document": ["zero", "-", "shot", "stance", "detection", "(", "zssd", ")", "aims", "to", "detect", "the", "stance", "for", "an", "unseen", "target", "during", "the", "inference", "stage", ".", "in", "this", "paper", ",", "we", "propose", "a", "joint", "contrastive", "learning", "(", "jointcl", ")", "framework", ",", "which", "consists", "of", "stance", "contrastive", "learning", "and", "target", "-", "aware", "prototypical", "graph", "contrastive", "learning", ".", "specifically", ",", "a", "stance", "contrastive", "learning", "strategy", "is", "employed", "to", "better", "generalize", "stance", "features", "for", "unseen", "targets", ".", "further", ",", "we", "build", "a", "prototypical", "graph", "for", "each", "instance", "to", "learn", "the", "target", "-", "based", "representation", ",", "in", "which", "the", "prototypes", "are", "deployed", "as", "a", "bridge", "to", "share", "the", "graph", "structures", "between", "the", "known", "targets", "and", "the", "unseen", "ones", ".", "then", "a", "novel", "target", "-", "aware", "prototypical", "graph", "contrastive", "learning", "strategy", "is", "devised", "to", "generalize", "the", "reasoning", "ability", "of", "target", "-", "based", "stance", "representations", "to", "the", "unseen", "targets", ".", "extensive", "experiments", "on", "three", "benchmark", "datasets", "show", "that", "the", "proposed", "approach", "achieves", "state", "-", "of", "-", "the", "-", "art", "performance", "in", "the", "zssd", "task", "."]}, {"venue": "ACL", "title": "Embedding Time Expressions for Deep Temporal Ordering Models", "abstract": "Data-driven models have demonstrated state-of-the-art performance in inferring the temporal ordering of events in text. However, these models often overlook explicit temporal signals, such as dates and time windows. Rule-based methods can be used to identify the temporal links between these time expressions (timexes), but they fail to capture timexes\u2019 interactions with events and are hard to integrate with the distributed representations of neural net models. In this paper, we introduce a framework to infuse temporal awareness into such models by learning a pre-trained model to embed timexes. We generate synthetic data consisting of pairs of timexes, then train a character LSTM to learn embeddings and classify the timexes\u2019 temporal relation. We evaluate the utility of these embeddings in the context of a strong neural model for event temporal ordering, and show a small increase in performance on the MATRES dataset and more substantial gains on an automatically collected dataset with more frequent event-timex interactions.", "doc_id": "63253e20ffd092a8f66eeaa301d3ceac", "publication_year": 2019, "sentences": ["data - driven models have demonstrated state - of - the - art performance in inferring the temporal ordering of events in text .", "however , these models often overlook explicit temporal signals , such as dates and time windows .", "rule - based methods can be used to identify the temporal links between these time expressions ( timexes ) , but they fail to capture timexes \u2019 interactions with events and are hard to integrate with the distributed representations of neural net models .", "in this paper , we introduce a framework to infuse temporal awareness into such models by learning a pre - trained model to embed timexes .", "we generate synthetic data consisting of pairs of timexes , then train a character lstm to learn embeddings and classify the timexes \u2019 temporal relation .", "we evaluate the utility of these embeddings in the context of a strong neural model for event temporal ordering , and show a small increase in performance on the matres dataset and more substantial gains on an automatically collected dataset with more frequent event - timex interactions ."], "events": [{"event_type": "ITT", "arguments": [{"text": "data - driven models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["data", "-", "driven", "models"], "offsets": [0, 1, 2, 3]}], "trigger": {"text": "demonstrated", "tokens": ["demonstrated"], "offsets": [5]}}, {"event_type": "RWF", "arguments": [{"text": "data - driven models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["data", "-", "driven", "models"], "offsets": [0, 1, 2, 3]}, {"text": "explicit temporal signals", "nugget_type": "FEA", "argument_type": "Fault", "tokens": ["explicit", "temporal", "signals"], "offsets": [30, 31, 32]}], "trigger": {"text": "often overlook", "tokens": ["often", "overlook"], "offsets": [28, 29]}}, {"event_type": "RWS", "arguments": [{"text": "rule - based methods", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["rule", "-", "based", "methods"], "offsets": [41, 42, 43, 44]}, {"text": "temporal links between these time expressions", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["temporal", "links", "between", "these", "time", "expressions"], "offsets": [51, 52, 53, 54, 55, 56]}], "trigger": {"text": "used", "tokens": ["used"], "offsets": [47]}}, {"event_type": "RWF", "arguments": [{"text": "rule - based methods", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["rule", "-", "based", "methods"], "offsets": [41, 42, 43, 44]}, {"text": "timexes \u2019 interactions with events", "nugget_type": "FEA", "argument_type": "Fault", "tokens": ["timexes", "\u2019", "interactions", "with", "events"], "offsets": [66, 67, 68, 69, 70]}], "trigger": {"text": "fail to capture", "tokens": ["fail", "to", "capture"], "offsets": [63, 64, 65]}}, {"event_type": "RWF", "arguments": [{"text": "rule - based methods", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["rule", "-", "based", "methods"], "offsets": [41, 42, 43, 44]}, {"text": "hard", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["hard"], "offsets": [73]}], "trigger": {"text": "hard", "tokens": ["hard"], "offsets": [73]}}, {"event_type": "PUR", "arguments": [{"text": "distributed representations of neural net models", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["distributed", "representations", "of", "neural", "net", "models"], "offsets": [78, 79, 80, 81, 82, 83]}], "trigger": {"text": "integrate", "tokens": ["integrate"], "offsets": [75]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [89]}, {"text": "framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["framework"], "offsets": [92]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [90]}}, {"event_type": "MDS", "arguments": [{"text": "pre - trained model", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["pre", "-", "trained", "model"], "offsets": [103, 104, 105, 106]}, {"text": "time expressions", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["time", "expressions"], "offsets": [55, 56]}], "trigger": {"text": "learning", "tokens": ["learning"], "offsets": [101]}}, {"event_type": "MDS", "arguments": [{"text": "temporal awareness", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["temporal", "awareness"], "offsets": [95, 96]}, {"text": "neural net models", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["neural", "net", "models"], "offsets": [81, 82, 83]}], "trigger": {"text": "infuse", "tokens": ["infuse"], "offsets": [94]}}, {"event_type": "MDS", "arguments": [{"text": "synthetic data consisting of pairs of timexes", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["synthetic", "data", "consisting", "of", "pairs", "of", "time", "expressions"], "offsets": [113, 114, 115, 116, 117, 118, 55, 56]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [112]}}, {"event_type": "MDS", "arguments": [{"text": "learn", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["learn"], "offsets": [127]}, {"text": "classify", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["classify"], "offsets": [130]}, {"text": "character lstm", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["character", "lstm"], "offsets": [124, 125]}], "trigger": {"text": "train", "tokens": ["train"], "offsets": [122]}}, {"event_type": "PUR", "arguments": [{"text": "embeddings", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["embeddings"], "offsets": [128]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [127]}}, {"event_type": "PUR", "arguments": [{"text": "timexes \u2019 temporal relation", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["time", "expressions", "\u2019", "temporal", "relation"], "offsets": [55, 56, 133, 134, 135]}], "trigger": {"text": "classify", "tokens": ["classify"], "offsets": [130]}}, {"event_type": "WKS", "arguments": [{"text": "utility of these embeddings", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["utility", "of", "embeddings"], "offsets": [140, 141, 128]}, {"text": "in the context of a strong neural model for event temporal ordering", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "context", "of", "a", "strong", "neural", "model", "for", "event", "temporal", "ordering"], "offsets": [144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155]}], "trigger": {"text": "evaluate", "tokens": ["evaluate"], "offsets": [138]}}, {"event_type": "FAC", "arguments": [{"text": "matres dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["matres", "dataset"], "offsets": [166, 167]}, {"text": "performance", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["performance"], "offsets": [163]}, {"text": "small increase", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["small", "increase"], "offsets": [160, 161]}, {"text": "automatically collected dataset with more frequent event - timex interactions", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["automatically", "collected", "dataset", "with", "more", "frequent", "event", "-", "timex", "interactions"], "offsets": [174, 175, 176, 177, 178, 179, 180, 181, 182, 183]}, {"text": "more substantial gains", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["more", "substantial", "gains"], "offsets": [169, 170, 171]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [158]}}], "document": ["data", "-", "driven", "models", "have", "demonstrated", "state", "-", "of", "-", "the", "-", "art", "performance", "in", "inferring", "the", "temporal", "ordering", "of", "events", "in", "text", ".", "however", ",", "these", "models", "often", "overlook", "explicit", "temporal", "signals", ",", "such", "as", "dates", "and", "time", "windows", ".", "rule", "-", "based", "methods", "can", "be", "used", "to", "identify", "the", "temporal", "links", "between", "these", "time", "expressions", "(", "timexes", ")", ",", "but", "they", "fail", "to", "capture", "timexes", "\u2019", "interactions", "with", "events", "and", "are", "hard", "to", "integrate", "with", "the", "distributed", "representations", "of", "neural", "net", "models", ".", "in", "this", "paper", ",", "we", "introduce", "a", "framework", "to", "infuse", "temporal", "awareness", "into", "such", "models", "by", "learning", "a", "pre", "-", "trained", "model", "to", "embed", "timexes", ".", "we", "generate", "synthetic", "data", "consisting", "of", "pairs", "of", "timexes", ",", "then", "train", "a", "character", "lstm", "to", "learn", "embeddings", "and", "classify", "the", "timexes", "\u2019", "temporal", "relation", ".", "we", "evaluate", "the", "utility", "of", "these", "embeddings", "in", "the", "context", "of", "a", "strong", "neural", "model", "for", "event", "temporal", "ordering", ",", "and", "show", "a", "small", "increase", "in", "performance", "on", "the", "matres", "dataset", "and", "more", "substantial", "gains", "on", "an", "automatically", "collected", "dataset", "with", "more", "frequent", "event", "-", "timex", "interactions", "."]}, {"venue": "ACL", "title": "Dialogue Natural Language Inference", "abstract": "Consistency is a long standing issue faced by dialogue models. In this paper, we frame the consistency of dialogue agents as natural language inference (NLI) and create a new natural language inference dataset called Dialogue NLI. We propose a method which demonstrates that a model trained on Dialogue NLI can be used to improve the consistency of a dialogue model, and evaluate the method with human evaluation and with automatic metrics on a suite of evaluation sets designed to measure a dialogue model\u2019s consistency.", "doc_id": "95e8359459e375790ae48f49082654cd", "publication_year": 2019, "sentences": ["consistency is a long standing issue faced by dialogue models .", "in this paper , we frame the consistency of dialogue agents as natural language inference ( nli ) and create a new natural language inference dataset called dialogue nli .", "we propose a method which demonstrates that a model trained on dialogue nli can be used to improve the consistency of a dialogue model , and evaluate the method with human evaluation and with automatic metrics on a suite of evaluation sets designed to measure a dialogue model \u2019 s consistency ."], "events": [{"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [15]}, {"text": "consistency of dialogue agents", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["consistency", "of", "dialogue", "agents"], "offsets": [18, 19, 20, 21]}, {"text": "natural language inference", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["natural", "language", "inference"], "offsets": [23, 24, 25]}], "trigger": {"text": "frame", "tokens": ["frame"], "offsets": [16]}}, {"event_type": "PRP", "arguments": [{"text": "natural language inference dataset", "nugget_type": "DST", "argument_type": "Content", "tokens": ["natural", "language", "inference", "dataset"], "offsets": [33, 34, 35, 36]}, {"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [15]}], "trigger": {"text": "create", "tokens": ["create"], "offsets": [30]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [41]}, {"text": "method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["method"], "offsets": [44]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [42]}}, {"event_type": "WKS", "arguments": [{"text": "a model trained on dialogue nli", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["a", "model", "trained", "on", "dialogue", "natural", "language", "inference"], "offsets": [48, 49, 50, 51, 52, 23, 24, 25]}, {"text": "improve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improve"], "offsets": [58]}], "trigger": {"text": "used", "tokens": ["used"], "offsets": [56]}}, {"event_type": "PUR", "arguments": [{"text": "consistency of a dialogue model", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["consistency", "of", "a", "dialogue", "model"], "offsets": [60, 61, 62, 63, 64]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [58]}}, {"event_type": "WKS", "arguments": [{"text": "measure", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["measure"], "offsets": [85]}, {"text": "method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["method"], "offsets": [69]}], "trigger": {"text": "evaluate", "tokens": ["evaluate"], "offsets": [67]}}, {"event_type": "PUR", "arguments": [{"text": "dialogue model \u2019 s consistency", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["dialogue", "model", "\u2019", "s", "consistency"], "offsets": [87, 88, 89, 90, 91]}], "trigger": {"text": "measure", "tokens": ["measure"], "offsets": [85]}}], "document": ["consistency", "is", "a", "long", "standing", "issue", "faced", "by", "dialogue", "models", ".", "in", "this", "paper", ",", "we", "frame", "the", "consistency", "of", "dialogue", "agents", "as", "natural", "language", "inference", "(", "nli", ")", "and", "create", "a", "new", "natural", "language", "inference", "dataset", "called", "dialogue", "nli", ".", "we", "propose", "a", "method", "which", "demonstrates", "that", "a", "model", "trained", "on", "dialogue", "nli", "can", "be", "used", "to", "improve", "the", "consistency", "of", "a", "dialogue", "model", ",", "and", "evaluate", "the", "method", "with", "human", "evaluation", "and", "with", "automatic", "metrics", "on", "a", "suite", "of", "evaluation", "sets", "designed", "to", "measure", "a", "dialogue", "model", "\u2019", "s", "consistency", "."]}, {"venue": "ACL", "title": "Improving Machine Reading Comprehension with Contextualized Commonsense Knowledge", "abstract": "To perform well on a machine reading comprehension (MRC) task, machine readers usually require commonsense knowledge that is not explicitly mentioned in the given documents. This paper aims to extract a new kind of structured knowledge from scripts and use it to improve MRC. We focus on scripts as they contain rich verbal and nonverbal messages, and two relevant messages originally conveyed by different modalities during a short time period may serve as arguments of a piece of commonsense knowledge as they function together in daily communications. To save human efforts to name relations, we propose to represent relations implicitly by situating such an argument pair in a context and call it contextualized knowledge. To use the extracted knowledge to improve MRC, we compare several fine-tuning strategies to use the weakly-labeled MRC data constructed based on contextualized knowledge and further design a teacher-student paradigm with multiple teachers to facilitate the transfer of knowledge in weakly-labeled MRC data. Experimental results show that our paradigm outperforms other methods that use weakly-labeled data and improves a state-of-the-art baseline by 4.3% in accuracy on a Chinese multiple-choice MRC dataset C3, wherein most of the questions require unstated prior knowledge. We also seek to transfer the knowledge to other tasks by simply adapting the resulting student reader, yielding a 2.9% improvement in F1 on a relation extraction dataset DialogRE, demonstrating the potential usefulness of the knowledge for non-MRC tasks that require document comprehension.", "doc_id": "a7a2e1b2a029667119fdb66ee6a9315c", "publication_year": 2022, "sentences": ["to perform well on a machine reading comprehension ( mrc ) task , machine readers usually require commonsense knowledge that is not explicitly mentioned in the given documents .", "this paper aims to extract a new kind of structured knowledge from scripts and use it to improve mrc .", "we focus on scripts as they contain rich verbal and nonverbal messages , and two relevant messages originally conveyed by different modalities during a short time period may serve as arguments of a piece of commonsense knowledge as they function together in daily communications .", "to save human efforts to name relations , we propose to represent relations implicitly by situating such an argument pair in a context and call it contextualized knowledge .", "to use the extracted knowledge to improve mrc , we compare several fine - tuning strategies to use the weakly - labeled mrc data constructed based on contextualized knowledge and further design a teacher - student paradigm with multiple teachers to facilitate the transfer of knowledge in weakly - labeled mrc data .", "experimental results show that our paradigm outperforms other methods that use weakly - labeled data and improves a state - of - the - art baseline by 4 . 3 % in accuracy on a chinese multiple - choice mrc dataset c3 , wherein most of the questions require unstated prior knowledge .", "we also seek to transfer the knowledge to other tasks by simply adapting the resulting student reader , yielding a 2 . 9 % improvement in f1 on a relation extraction dataset dialogre , demonstrating the potential usefulness of the knowledge for non - mrc tasks that require document comprehension ."], "events": [{"event_type": "ITT", "arguments": [{"text": "machine reading comprehension", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["machine", "reading", "comprehension"], "offsets": [5, 6, 7]}], "trigger": {"text": "perform", "tokens": ["perform"], "offsets": [1]}}, {"event_type": "WKS", "arguments": [{"text": "from scripts", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "scripts"], "offsets": [40, 41]}, {"text": "structured knowledge", "nugget_type": "APP", "argument_type": "Content", "tokens": ["structured", "knowledge"], "offsets": [38, 39]}], "trigger": {"text": "extract", "tokens": ["extract"], "offsets": [33]}}, {"event_type": "WKS", "arguments": [{"text": "structured knowledge", "nugget_type": "APP", "argument_type": "Content", "tokens": ["structured", "knowledge"], "offsets": [38, 39]}, {"text": "improve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improve"], "offsets": [46]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [43]}}, {"event_type": "PUR", "arguments": [{"text": "mrc", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["machine", "reading", "comprehension"], "offsets": [5, 6, 7]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [46]}}, {"event_type": "MDS", "arguments": [{"text": "contextualized knowledge", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["contextualized", "knowledge"], "offsets": [120, 121]}, {"text": "context", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["context"], "offsets": [116]}, {"text": "represent", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["represent"], "offsets": [105]}], "trigger": {"text": "situating", "tokens": ["situating"], "offsets": [109]}}, {"event_type": "PUR", "arguments": [{"text": "relations", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["relations"], "offsets": [106]}, {"text": "implicitly", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["implicitly"], "offsets": [107]}], "trigger": {"text": "represent", "tokens": ["represent"], "offsets": [105]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [132]}, {"text": "several fine - tuning strategies", "nugget_type": "APP", "argument_type": "Content", "tokens": ["several", "fine", "-", "tuning", "strategies"], "offsets": [134, 135, 136, 137, 138]}, {"text": "use", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["use"], "offsets": [124]}], "trigger": {"text": "compare", "tokens": ["compare"], "offsets": [133]}}, {"event_type": "PUR", "arguments": [{"text": "extracted knowledge", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["extracted", "knowledge"], "offsets": [126, 127]}, {"text": "to improve mrc", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["to", "improve", "machine", "reading", "comprehension"], "offsets": [128, 129, 5, 6, 7]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [124]}}, {"event_type": "MDS", "arguments": [{"text": "contextualized knowledge", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["contextualized", "knowledge"], "offsets": [150, 151]}, {"text": "weakly - labeled mrc data", "nugget_type": "DST", "argument_type": "TriedComponent", "tokens": ["weakly", "-", "labeled", "machine", "reading", "comprehension", "data"], "offsets": [142, 143, 144, 5, 6, 7, 146]}], "trigger": {"text": "constructed", "tokens": ["constructed"], "offsets": [147]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [132]}, {"text": "teacher - student paradigm", "nugget_type": "APP", "argument_type": "Content", "tokens": ["teacher", "-", "student", "paradigm"], "offsets": [156, 157, 158, 159]}, {"text": "facilitate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["facilitate"], "offsets": [164]}], "trigger": {"text": "design", "tokens": ["design"], "offsets": [154]}}, {"event_type": "PUR", "arguments": [{"text": "transfer of knowledge", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["transfer", "of", "knowledge"], "offsets": [166, 167, 168]}, {"text": "weakly - labeled mrc data", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["weakly", "-", "labeled", "machine", "reading", "comprehension", "data"], "offsets": [170, 171, 172, 5, 6, 7, 174]}], "trigger": {"text": "facilitate", "tokens": ["facilitate"], "offsets": [164]}}, {"event_type": "CMP", "arguments": [{"text": "paradigm", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["paradigm"], "offsets": [181]}, {"text": "other methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["other", "methods"], "offsets": [183, 184]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [182]}}, {"event_type": "FAC", "arguments": [{"text": "paradigm", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["paradigm"], "offsets": [181]}, {"text": "state - of - the - art baseline", "nugget_type": "APP", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art", "baseline"], "offsets": [194, 195, 196, 197, 198, 199, 200, 201]}, {"text": "4 . 3 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["4", ".", "3", "%"], "offsets": [203, 204, 205, 206]}, {"text": "accuracy", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["accuracy"], "offsets": [208]}, {"text": "chinese multiple - choice mrc dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["chinese", "multiple", "-", "choice", "machine", "reading", "comprehension", "dataset"], "offsets": [211, 212, 213, 214, 5, 6, 7, 216]}], "trigger": {"text": "improves", "tokens": ["improves"], "offsets": [192]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [229]}, {"text": "resulting student reader", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["resulting", "student", "reader"], "offsets": [243, 244, 245]}, {"text": "transfer", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["transfer"], "offsets": [233]}], "trigger": {"text": "adapting", "tokens": ["adapting"], "offsets": [241]}}, {"event_type": "PUR", "arguments": [{"text": "knowledge to other tasks", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["knowledge", "to", "other", "tasks"], "offsets": [235, 236, 237, 238]}], "trigger": {"text": "transfer", "tokens": ["transfer"], "offsets": [233]}}, {"event_type": "FAC", "arguments": [{"text": "2 . 9 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["2", ".", "9", "%"], "offsets": [249, 250, 251, 252]}, {"text": "improvement in f1", "nugget_type": "STR", "argument_type": "Subject", "tokens": ["improvement", "in", "f1"], "offsets": [253, 254, 255]}, {"text": "relation extraction dataset dialogre", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["relation", "extraction", "dataset", "dialogre"], "offsets": [258, 259, 260, 261]}], "trigger": {"text": "yielding", "tokens": ["yielding"], "offsets": [247]}}, {"event_type": "FAC", "arguments": [{"text": "potential usefulness of the knowledge", "nugget_type": "STR", "argument_type": "Subject", "tokens": ["potential", "usefulness", "of", "the", "knowledge"], "offsets": [265, 266, 267, 268, 269]}, {"text": "non - mrc tasks that require document comprehension", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["non", "-", "machine", "reading", "comprehension", "tasks", "that", "require", "document", "comprehension"], "offsets": [271, 272, 5, 6, 7, 274, 275, 276, 277, 278]}], "trigger": {"text": "demonstrating", "tokens": ["demonstrating"], "offsets": [263]}}], "document": ["to", "perform", "well", "on", "a", "machine", "reading", "comprehension", "(", "mrc", ")", "task", ",", "machine", "readers", "usually", "require", "commonsense", "knowledge", "that", "is", "not", "explicitly", "mentioned", "in", "the", "given", "documents", ".", "this", "paper", "aims", "to", "extract", "a", "new", "kind", "of", "structured", "knowledge", "from", "scripts", "and", "use", "it", "to", "improve", "mrc", ".", "we", "focus", "on", "scripts", "as", "they", "contain", "rich", "verbal", "and", "nonverbal", "messages", ",", "and", "two", "relevant", "messages", "originally", "conveyed", "by", "different", "modalities", "during", "a", "short", "time", "period", "may", "serve", "as", "arguments", "of", "a", "piece", "of", "commonsense", "knowledge", "as", "they", "function", "together", "in", "daily", "communications", ".", "to", "save", "human", "efforts", "to", "name", "relations", ",", "we", "propose", "to", "represent", "relations", "implicitly", "by", "situating", "such", "an", "argument", "pair", "in", "a", "context", "and", "call", "it", "contextualized", "knowledge", ".", "to", "use", "the", "extracted", "knowledge", "to", "improve", "mrc", ",", "we", "compare", "several", "fine", "-", "tuning", "strategies", "to", "use", "the", "weakly", "-", "labeled", "mrc", "data", "constructed", "based", "on", "contextualized", "knowledge", "and", "further", "design", "a", "teacher", "-", "student", "paradigm", "with", "multiple", "teachers", "to", "facilitate", "the", "transfer", "of", "knowledge", "in", "weakly", "-", "labeled", "mrc", "data", ".", "experimental", "results", "show", "that", "our", "paradigm", "outperforms", "other", "methods", "that", "use", "weakly", "-", "labeled", "data", "and", "improves", "a", "state", "-", "of", "-", "the", "-", "art", "baseline", "by", "4", ".", "3", "%", "in", "accuracy", "on", "a", "chinese", "multiple", "-", "choice", "mrc", "dataset", "c3", ",", "wherein", "most", "of", "the", "questions", "require", "unstated", "prior", "knowledge", ".", "we", "also", "seek", "to", "transfer", "the", "knowledge", "to", "other", "tasks", "by", "simply", "adapting", "the", "resulting", "student", "reader", ",", "yielding", "a", "2", ".", "9", "%", "improvement", "in", "f1", "on", "a", "relation", "extraction", "dataset", "dialogre", ",", "demonstrating", "the", "potential", "usefulness", "of", "the", "knowledge", "for", "non", "-", "mrc", "tasks", "that", "require", "document", "comprehension", "."]}, {"venue": "ACL", "title": "Parameter-Efficient Transfer Learning with Diff Pruning", "abstract": "The large size of pretrained networks makes them difficult to deploy for multiple tasks in storage-constrained settings. Diff pruning enables parameter-efficient transfer learning that scales well with new tasks. The approach learns a task-specific \u201cdiff\u201d vector that extends the original pretrained parameters. This diff vector is adaptively pruned during training with a differentiable approximation to the L0-norm penalty to encourage sparsity. As the number of tasks increases, diff pruning remains parameter-efficient, as it requires storing only a small diff vector for each task. Since it does not require access to all tasks during training, it is attractive in on-device deployment settings where tasks arrive in stream or even from different providers. Diff pruning can match the performance of finetuned baselines on the GLUE benchmark while only modifying 0.5% of the pretrained model\u2019s parameters per task and scales favorably in comparison to popular pruning approaches.", "doc_id": "775fcbff3f2ceb23fbc28111b0c62a56", "publication_year": 2021, "sentences": ["the large size of pretrained networks makes them difficult to deploy for multiple tasks in storage - constrained settings .", "diff pruning enables parameter - efficient transfer learning that scales well with new tasks .", "the approach learns a task - specific \u201c diff \u201d vector that extends the original pretrained parameters .", "this diff vector is adaptively pruned during training with a differentiable approximation to the l0 - norm penalty to encourage sparsity .", "as the number of tasks increases , diff pruning remains parameter - efficient , as it requires storing only a small diff vector for each task .", "since it does not require access to all tasks during training , it is attractive in on - device deployment settings where tasks arrive in stream or even from different providers .", "diff pruning can match the performance of finetuned baselines on the glue benchmark while only modifying 0 . 5 % of the pretrained model \u2019 s parameters per task and scales favorably in comparison to popular pruning approaches ."], "events": [{"event_type": "ITT", "arguments": [{"text": "pretrained networks", "nugget_type": "APP", "argument_type": "Target", "tokens": ["pretrained", "networks"], "offsets": [4, 5]}], "trigger": {"text": "difficult", "tokens": ["difficult"], "offsets": [8]}}, {"event_type": "PRP", "arguments": [{"text": "diff pruning", "nugget_type": "APP", "argument_type": "Content", "tokens": ["diff", "pruning"], "offsets": [20, 21]}], "trigger": {"text": "enables", "tokens": ["enables"], "offsets": [22]}}, {"event_type": "CMP", "arguments": [{"text": "diff pruning", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["diff", "pruning"], "offsets": [134, 135]}, {"text": "popular pruning approaches", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["popular", "pruning", "approaches"], "offsets": [169, 170, 171]}], "trigger": {"text": "scales favorably", "tokens": ["scales", "favorably"], "offsets": [164, 165]}}, {"event_type": "FAC", "arguments": [{"text": "diff pruning", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["diff", "pruning"], "offsets": [134, 135]}, {"text": "performance", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["performance"], "offsets": [139]}, {"text": "finetuned baselines", "nugget_type": "APP", "argument_type": "Object", "tokens": ["finetuned", "baselines"], "offsets": [141, 142]}], "trigger": {"text": "match", "tokens": ["match"], "offsets": [137]}}], "document": ["the", "large", "size", "of", "pretrained", "networks", "makes", "them", "difficult", "to", "deploy", "for", "multiple", "tasks", "in", "storage", "-", "constrained", "settings", ".", "diff", "pruning", "enables", "parameter", "-", "efficient", "transfer", "learning", "that", "scales", "well", "with", "new", "tasks", ".", "the", "approach", "learns", "a", "task", "-", "specific", "\u201c", "diff", "\u201d", "vector", "that", "extends", "the", "original", "pretrained", "parameters", ".", "this", "diff", "vector", "is", "adaptively", "pruned", "during", "training", "with", "a", "differentiable", "approximation", "to", "the", "l0", "-", "norm", "penalty", "to", "encourage", "sparsity", ".", "as", "the", "number", "of", "tasks", "increases", ",", "diff", "pruning", "remains", "parameter", "-", "efficient", ",", "as", "it", "requires", "storing", "only", "a", "small", "diff", "vector", "for", "each", "task", ".", "since", "it", "does", "not", "require", "access", "to", "all", "tasks", "during", "training", ",", "it", "is", "attractive", "in", "on", "-", "device", "deployment", "settings", "where", "tasks", "arrive", "in", "stream", "or", "even", "from", "different", "providers", ".", "diff", "pruning", "can", "match", "the", "performance", "of", "finetuned", "baselines", "on", "the", "glue", "benchmark", "while", "only", "modifying", "0", ".", "5", "%", "of", "the", "pretrained", "model", "\u2019", "s", "parameters", "per", "task", "and", "scales", "favorably", "in", "comparison", "to", "popular", "pruning", "approaches", "."]}, {"venue": "ACL", "title": "Employing Argumentation Knowledge Graphs for Neural Argument Generation", "abstract": "Generating high-quality arguments, while being challenging, may benefit a wide range of downstream applications, such as writing assistants and argument search engines. Motivated by the effectiveness of utilizing knowledge graphs for supporting general text generation tasks, this paper investigates the usage of argumentation-related knowledge graphs to control the generation of arguments. In particular, we construct and populate three knowledge graphs, employing several compositions of them to encode various knowledge into texts of debate portals and relevant paragraphs from Wikipedia. Then, the texts with the encoded knowledge are used to fine-tune a pre-trained text generation model, GPT-2. We evaluate the newly created arguments manually and automatically, based on several dimensions important in argumentative contexts, including argumentativeness and plausibility. The results demonstrate the positive impact of encoding the graphs\u2019 knowledge into debate portal texts for generating arguments with superior quality than those generated without knowledge.", "doc_id": "620848e285ca5b97795866caef7d551f", "publication_year": 2021, "sentences": ["generating high - quality arguments , while being challenging , may benefit a wide range of downstream applications , such as writing assistants and argument search engines .", "motivated by the effectiveness of utilizing knowledge graphs for supporting general text generation tasks , this paper investigates the usage of argumentation - related knowledge graphs to control the generation of arguments .", "in particular , we construct and populate three knowledge graphs , employing several compositions of them to encode various knowledge into texts of debate portals and relevant paragraphs from wikipedia .", "then , the texts with the encoded knowledge are used to fine - tune a pre - trained text generation model , gpt - 2 .", "we evaluate the newly created arguments manually and automatically , based on several dimensions important in argumentative contexts , including argumentativeness and plausibility .", "the results demonstrate the positive impact of encoding the graphs \u2019 knowledge into debate portal texts for generating arguments with superior quality than those generated without knowledge ."], "events": [{"event_type": "ITT", "arguments": [{"text": "generating high - quality arguments", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["generating", "high", "-", "quality", "arguments"], "offsets": [0, 1, 2, 3, 4]}], "trigger": {"text": "challenging", "tokens": ["challenging"], "offsets": [8]}}, {"event_type": "WKS", "arguments": [{"text": "control", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["control"], "offsets": [55]}, {"text": "usage of argumentation - related knowledge graphs", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["usage", "of", "argumentation", "-", "related", "knowledge", "graphs"], "offsets": [47, 48, 49, 50, 51, 52, 53]}], "trigger": {"text": "investigates", "tokens": ["investigates"], "offsets": [45]}}, {"event_type": "PUR", "arguments": [{"text": "generation of arguments", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["generation", "of", "arguments"], "offsets": [57, 58, 59]}], "trigger": {"text": "control", "tokens": ["control"], "offsets": [55]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [64]}, {"text": "three knowledge graphs", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["three", "knowledge", "graphs"], "offsets": [68, 69, 70]}], "trigger": {"text": "construct and populate", "tokens": ["construct", "and", "populate"], "offsets": [65, 66, 67]}}, {"event_type": "MDS", "arguments": [{"text": "texts of debate portals", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["texts", "of", "debate", "portals"], "offsets": [82, 83, 84, 85]}, {"text": "relevant paragraphs", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["relevant", "paragraphs"], "offsets": [87, 88]}, {"text": "from wikipedia", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "wikipedia"], "offsets": [89, 90]}, {"text": "various knowledge", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["various", "knowledge"], "offsets": [79, 80]}], "trigger": {"text": "encode", "tokens": ["encode"], "offsets": [78]}}, {"event_type": "MDS", "arguments": [{"text": "pre - trained text generation model", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["pre", "-", "trained", "text", "generation", "model"], "offsets": [107, 108, 109, 110, 111, 112]}, {"text": "texts", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["texts"], "offsets": [95]}, {"text": "with the encoded knowledge", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "the", "encoded", "knowledge"], "offsets": [96, 97, 98, 99]}], "trigger": {"text": "fine - tune", "tokens": ["fine", "-", "tune"], "offsets": [103, 104, 105]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [118]}, {"text": "newly created arguments", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["newly", "created", "arguments"], "offsets": [121, 122, 123]}, {"text": "manually and automatically", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["manually", "and", "automatically"], "offsets": [124, 125, 126]}, {"text": "based on several dimensions important in argumentative contexts", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["based", "on", "several", "dimensions", "important", "in", "argumentative", "contexts"], "offsets": [128, 129, 130, 131, 132, 133, 134, 135]}], "trigger": {"text": "evaluate", "tokens": ["evaluate"], "offsets": [119]}}, {"event_type": "PUR", "arguments": [{"text": "arguments", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["arguments"], "offsets": [160]}, {"text": "with superior quality", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "superior", "quality"], "offsets": [161, 162, 163]}], "trigger": {"text": "generating", "tokens": ["generating"], "offsets": [159]}}, {"event_type": "FAC", "arguments": [{"text": "positive impact of encoding the graphs \u2019 knowledge into debate portal texts", "nugget_type": "STR", "argument_type": "Object", "tokens": ["positive", "impact", "of", "encoding", "the", "graphs", "\u2019", "knowledge", "into", "debate", "portal", "texts"], "offsets": [146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157]}, {"text": "generating", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["generating"], "offsets": [159]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [144]}}], "document": ["generating", "high", "-", "quality", "arguments", ",", "while", "being", "challenging", ",", "may", "benefit", "a", "wide", "range", "of", "downstream", "applications", ",", "such", "as", "writing", "assistants", "and", "argument", "search", "engines", ".", "motivated", "by", "the", "effectiveness", "of", "utilizing", "knowledge", "graphs", "for", "supporting", "general", "text", "generation", "tasks", ",", "this", "paper", "investigates", "the", "usage", "of", "argumentation", "-", "related", "knowledge", "graphs", "to", "control", "the", "generation", "of", "arguments", ".", "in", "particular", ",", "we", "construct", "and", "populate", "three", "knowledge", "graphs", ",", "employing", "several", "compositions", "of", "them", "to", "encode", "various", "knowledge", "into", "texts", "of", "debate", "portals", "and", "relevant", "paragraphs", "from", "wikipedia", ".", "then", ",", "the", "texts", "with", "the", "encoded", "knowledge", "are", "used", "to", "fine", "-", "tune", "a", "pre", "-", "trained", "text", "generation", "model", ",", "gpt", "-", "2", ".", "we", "evaluate", "the", "newly", "created", "arguments", "manually", "and", "automatically", ",", "based", "on", "several", "dimensions", "important", "in", "argumentative", "contexts", ",", "including", "argumentativeness", "and", "plausibility", ".", "the", "results", "demonstrate", "the", "positive", "impact", "of", "encoding", "the", "graphs", "\u2019", "knowledge", "into", "debate", "portal", "texts", "for", "generating", "arguments", "with", "superior", "quality", "than", "those", "generated", "without", "knowledge", "."]}, {"venue": "ACL", "title": "Analyzing Wrap-Up Effects through an Information-Theoretic Lens", "abstract": "Numerous analyses of reading time (RT) data have been undertaken in the effort to learn more about the internal processes that occur during reading comprehension. However, data measured on words at the end of a sentence\u2013or even clause\u2013is often omitted due to the confounding factors introduced by so-called \u201cwrap-up effects,\u201d which manifests as a skewed distribution of RTs for these words. Consequently, the understanding of the cognitive processes that might be involved in these effects is limited. In this work, we attempt to learn more about these processes by looking for the existence\u2013or absence\u2013of a link between wrap-up effects and information theoretic quantities, such as word and context information content. We find that the information distribution of prior context is often predictive of sentence- and clause-final RTs (while not of sentence-medial RTs), which lends support to several prior hypotheses about the processes involved in wrap-up effects.", "doc_id": "c230114c489135157e9484b5ab5fa59f", "publication_year": 2022, "sentences": ["numerous analyses of reading time ( rt ) data have been undertaken in the effort to learn more about the internal processes that occur during reading comprehension .", "however , data measured on words at the end of a sentence \u2013 or even clause \u2013 is often omitted due to the confounding factors introduced by so - called \u201c wrap - up effects , \u201d which manifests as a skewed distribution of rts for these words .", "consequently , the understanding of the cognitive processes that might be involved in these effects is limited .", "in this work , we attempt to learn more about these processes by looking for the existence \u2013 or absence \u2013 of a link between wrap - up effects and information theoretic quantities , such as word and context information content .", "we find that the information distribution of prior context is often predictive of sentence - and clause - final rts ( while not of sentence - medial rts ) , which lends support to several prior hypotheses about the processes involved in wrap - up effects ."], "events": [{"event_type": "ITT", "arguments": [{"text": "reading comprehension", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["reading", "comprehension"], "offsets": [25, 26]}], "trigger": {"text": "occur", "tokens": ["occur"], "offsets": [23]}}, {"event_type": "RWF", "arguments": [{"text": "these words", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["these", "words"], "offsets": [74, 75]}, {"text": "skewed distribution", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["skewed", "distribution"], "offsets": [69, 70]}, {"text": "rts", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["reading", "time"], "offsets": [3, 4]}], "trigger": {"text": "manifests", "tokens": ["manifests"], "offsets": [66]}}, {"event_type": "RWF", "arguments": [{"text": "confounding factors", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["confounding", "factors"], "offsets": [51, 52]}, {"text": "data measured on words at the end of a sentence", "nugget_type": "DST", "argument_type": "Concern", "tokens": ["data", "measured", "on", "words", "at", "the", "end", "of", "a", "sentence"], "offsets": [30, 31, 32, 33, 34, 35, 36, 37, 38, 39]}], "trigger": {"text": "omitted", "tokens": ["omitted"], "offsets": [47]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [99]}, {"text": "existence \u2013 or absence \u2013 of a link", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["existence", "\u2013", "or", "absence", "\u2013", "of", "a", "link"], "offsets": [111, 112, 113, 114, 115, 116, 117, 118]}, {"text": "between wrap - up effects and information theoretic quantities", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["between", "wrap", "-", "up", "effects", "and", "information", "theoretic", "quantities"], "offsets": [119, 120, 121, 122, 123, 124, 125, 126, 127]}, {"text": "learn", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["learn"], "offsets": [102]}], "trigger": {"text": "looking for", "tokens": ["looking", "for"], "offsets": [108, 109]}}, {"event_type": "PUR", "arguments": [{"text": "more about these processes", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["more", "about", "these", "processes"], "offsets": [103, 104, 105, 106]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [102]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [137]}, {"text": "predictive", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["predictive"], "offsets": [148]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [138]}}, {"event_type": "FAC", "arguments": [{"text": "information distribution of prior context", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["information", "distribution", "of", "prior", "context"], "offsets": [141, 142, 143, 144, 145]}, {"text": "sentence - final rts", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["sentence", "-", "final", "reading", "time"], "offsets": [150, 151, 155, 3, 4]}, {"text": "clause - final rts", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["clause", "-", "final", "reading", "time"], "offsets": [153, 154, 155, 3, 4]}], "trigger": {"text": "predictive", "tokens": ["predictive"], "offsets": [148]}}, {"event_type": "FAC", "arguments": [{"text": "several prior hypotheses", "nugget_type": "APP", "argument_type": "Object", "tokens": ["several", "prior", "hypotheses"], "offsets": [171, 172, 173]}, {"text": "information distribution of prior context", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["information", "distribution", "of", "prior", "context"], "offsets": [141, 142, 143, 144, 145]}], "trigger": {"text": "support", "tokens": ["support"], "offsets": [169]}}], "document": ["numerous", "analyses", "of", "reading", "time", "(", "rt", ")", "data", "have", "been", "undertaken", "in", "the", "effort", "to", "learn", "more", "about", "the", "internal", "processes", "that", "occur", "during", "reading", "comprehension", ".", "however", ",", "data", "measured", "on", "words", "at", "the", "end", "of", "a", "sentence", "\u2013", "or", "even", "clause", "\u2013", "is", "often", "omitted", "due", "to", "the", "confounding", "factors", "introduced", "by", "so", "-", "called", "\u201c", "wrap", "-", "up", "effects", ",", "\u201d", "which", "manifests", "as", "a", "skewed", "distribution", "of", "rts", "for", "these", "words", ".", "consequently", ",", "the", "understanding", "of", "the", "cognitive", "processes", "that", "might", "be", "involved", "in", "these", "effects", "is", "limited", ".", "in", "this", "work", ",", "we", "attempt", "to", "learn", "more", "about", "these", "processes", "by", "looking", "for", "the", "existence", "\u2013", "or", "absence", "\u2013", "of", "a", "link", "between", "wrap", "-", "up", "effects", "and", "information", "theoretic", "quantities", ",", "such", "as", "word", "and", "context", "information", "content", ".", "we", "find", "that", "the", "information", "distribution", "of", "prior", "context", "is", "often", "predictive", "of", "sentence", "-", "and", "clause", "-", "final", "rts", "(", "while", "not", "of", "sentence", "-", "medial", "rts", ")", ",", "which", "lends", "support", "to", "several", "prior", "hypotheses", "about", "the", "processes", "involved", "in", "wrap", "-", "up", "effects", "."]}, {"venue": "ACL", "title": "Interpretable and Low-Resource Entity Matching via Decoupling Feature Learning from Decision Making", "abstract": "Entity Matching (EM) aims at recognizing entity records that denote the same real-world object. Neural EM models learn vector representation of entity descriptions and match entities end-to-end. Though robust, these methods require many annotated resources for training, and lack of interpretability. In this paper, we propose a novel EM framework that consists of Heterogeneous Information Fusion (HIF) and Key Attribute Tree (KAT) Induction to decouple feature representation from matching decision. Using self-supervised learning and mask mechanism in pre-trained language modeling, HIF learns the embeddings of noisy attribute values by inter-attribute attention with unlabeled data. Using a set of comparison features and a limited amount of annotated data, KAT Induction learns an efficient decision tree that can be interpreted by generating entity matching rules whose structure is advocated by domain experts. Experiments on 6 public datasets and 3 industrial datasets show that our method is highly efficient and outperforms SOTA EM models in most cases. We will release the codes upon acceptance.", "doc_id": "4101567e68dc02c56e833450efdb34ca", "publication_year": 2021, "sentences": ["entity matching ( em ) aims at recognizing entity records that denote the same real - world object .", "neural em models learn vector representation of entity descriptions and match entities end - to - end .", "though robust , these methods require many annotated resources for training , and lack of interpretability .", "in this paper , we propose a novel em framework that consists of heterogeneous information fusion ( hif ) and key attribute tree ( kat )", "induction to decouple feature representation from matching decision .", "using self - supervised learning and mask mechanism in pre - trained language modeling , hif learns the embeddings of noisy attribute values by inter - attribute attention with unlabeled data .", "using a set of comparison features and a limited amount of annotated data , kat induction learns an efficient decision tree that can be interpreted by generating entity matching rules whose structure is advocated by domain experts .", "experiments on 6 public datasets and 3 industrial datasets show that our method is highly efficient and outperforms sota em models in most cases .", "we will release the codes upon acceptance ."], "events": [{"event_type": "ITT", "arguments": [{"text": "entity matching", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["entity", "matching"], "offsets": [0, 1]}], "trigger": {"text": "aims", "tokens": ["aims"], "offsets": [5]}}, {"event_type": "RWS", "arguments": [{"text": "neural em models", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["neural", "em", "models"], "offsets": [19, 20, 21]}, {"text": "vector representation", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["vector", "representation"], "offsets": [23, 24]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [22]}}, {"event_type": "RWS", "arguments": [{"text": "neural em models", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["neural", "em", "models"], "offsets": [19, 20, 21]}, {"text": "entities", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["entities"], "offsets": [30]}, {"text": "end - to - end", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["end", "-", "to", "-", "end"], "offsets": [31, 32, 33, 34, 35]}], "trigger": {"text": "match", "tokens": ["match"], "offsets": [29]}}, {"event_type": "RWF", "arguments": [{"text": "many annotated resources", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["many", "annotated", "resources"], "offsets": [43, 44, 45]}, {"text": "training", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["training"], "offsets": [47]}, {"text": "lack of interpretability", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["lack", "of", "interpretability"], "offsets": [50, 51, 52]}, {"text": "neural em models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["neural", "em", "models"], "offsets": [19, 20, 21]}], "trigger": {"text": "require", "tokens": ["require"], "offsets": [42]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [58]}, {"text": "em framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["entity", "matching", "framework"], "offsets": [0, 1, 63]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [59]}}, {"event_type": "MDS", "arguments": [{"text": "using self - supervised learning and mask mechanism in pre - trained language modeling", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["using", "self", "-", "supervised", "learning", "and", "mask", "mechanism", "in", "pre", "-", "trained", "language", "modeling"], "offsets": [89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102]}, {"text": "heterogeneous information fusion", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["heterogeneous", "information", "fusion"], "offsets": [67, 68, 69]}, {"text": "embeddings of noisy attribute values", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["embeddings", "of", "noisy", "attribute", "values"], "offsets": [107, 108, 109, 110, 111]}, {"text": "inter - attribute attention", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["inter", "-", "attribute", "attention"], "offsets": [113, 114, 115, 116]}, {"text": "unlabeled data", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["unlabeled", "data"], "offsets": [118, 119]}], "trigger": {"text": "learns", "tokens": ["learns"], "offsets": [105]}}, {"event_type": "MDS", "arguments": [{"text": "entity matching rules", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["entity", "matching", "rules"], "offsets": [148, 149, 150]}, {"text": "efficient decision tree", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["efficient", "decision", "tree"], "offsets": [139, 140, 141]}], "trigger": {"text": "interpreted", "tokens": ["interpreted"], "offsets": [145]}}, {"event_type": "FAC", "arguments": [{"text": "6 public datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["6", "public", "datasets"], "offsets": [161, 162, 163]}, {"text": "em framework", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["entity", "matching", "framework"], "offsets": [0, 1, 63]}, {"text": "3 industrial datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["3", "industrial", "datasets"], "offsets": [165, 166, 167]}], "trigger": {"text": "highly efficient", "tokens": ["highly", "efficient"], "offsets": [173, 174]}}, {"event_type": "CMP", "arguments": [{"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [176]}, {"text": "sota em models", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["sota", "entity", "matching", "models"], "offsets": [177, 0, 1, 179]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [176]}}], "document": ["entity", "matching", "(", "em", ")", "aims", "at", "recognizing", "entity", "records", "that", "denote", "the", "same", "real", "-", "world", "object", ".", "neural", "em", "models", "learn", "vector", "representation", "of", "entity", "descriptions", "and", "match", "entities", "end", "-", "to", "-", "end", ".", "though", "robust", ",", "these", "methods", "require", "many", "annotated", "resources", "for", "training", ",", "and", "lack", "of", "interpretability", ".", "in", "this", "paper", ",", "we", "propose", "a", "novel", "em", "framework", "that", "consists", "of", "heterogeneous", "information", "fusion", "(", "hif", ")", "and", "key", "attribute", "tree", "(", "kat", ")", "induction", "to", "decouple", "feature", "representation", "from", "matching", "decision", ".", "using", "self", "-", "supervised", "learning", "and", "mask", "mechanism", "in", "pre", "-", "trained", "language", "modeling", ",", "hif", "learns", "the", "embeddings", "of", "noisy", "attribute", "values", "by", "inter", "-", "attribute", "attention", "with", "unlabeled", "data", ".", "using", "a", "set", "of", "comparison", "features", "and", "a", "limited", "amount", "of", "annotated", "data", ",", "kat", "induction", "learns", "an", "efficient", "decision", "tree", "that", "can", "be", "interpreted", "by", "generating", "entity", "matching", "rules", "whose", "structure", "is", "advocated", "by", "domain", "experts", ".", "experiments", "on", "6", "public", "datasets", "and", "3", "industrial", "datasets", "show", "that", "our", "method", "is", "highly", "efficient", "and", "outperforms", "sota", "em", "models", "in", "most", "cases", ".", "we", "will", "release", "the", "codes", "upon", "acceptance", "."]}, {"venue": "ACL", "title": "Are Red Roses Red? Evaluating Consistency of Question-Answering Models", "abstract": "Although current evaluation of question-answering systems treats predictions in isolation, we need to consider the relationship between predictions to measure true understanding. A model should be penalized for answering \u201cno\u201d to \u201cIs the rose red?\u201d if it answers \u201cred\u201d to \u201cWhat color is the rose?\u201d. We propose a method to automatically extract such implications for instances from two QA datasets, VQA and SQuAD, which we then use to evaluate the consistency of models. Human evaluation shows these generated implications are well formed and valid. Consistency evaluation provides crucial insights into gaps in existing models, while retraining with implication-augmented data improves consistency on both synthetic and human-generated implications.", "doc_id": "f274547359b70b603db8efe66a4b1ea3", "publication_year": 2019, "sentences": ["although current evaluation of question - answering systems treats predictions in isolation , we need to consider the relationship between predictions to measure true understanding .", "a model should be penalized for answering \u201c no \u201d to \u201c is the rose red ? \u201d if it answers \u201c red \u201d to \u201c what color is the rose ? \u201d .", "we propose a method to automatically extract such implications for instances from two qa datasets , vqa and squad , which we then use to evaluate the consistency of models .", "human evaluation shows these generated implications are well formed and valid .", "consistency evaluation provides crucial insights into gaps in existing models , while retraining with implication - augmented data improves consistency on both synthetic and human - generated implications ."], "events": [{"event_type": "ITT", "arguments": [{"text": "question - answering systems", "nugget_type": "APP", "argument_type": "Target", "tokens": ["question", "-", "answering", "systems"], "offsets": [4, 5, 6, 7]}], "trigger": {"text": "treats", "tokens": ["treats"], "offsets": [8]}}, {"event_type": "MDS", "arguments": [{"text": "such implications", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["such", "implications"], "offsets": [67, 68]}, {"text": "instances", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["instances"], "offsets": [70]}, {"text": "from two qa datasets", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "two", "qa", "datasets"], "offsets": [71, 72, 73, 74]}], "trigger": {"text": "extract", "tokens": ["extract"], "offsets": [66]}}, {"event_type": "MDS", "arguments": [{"text": "vqa and squad", "nugget_type": "DST", "argument_type": "TriedComponent", "tokens": ["vqa", "and", "squad"], "offsets": [76, 77, 78]}, {"text": "consistency of models", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["consistency", "of", "models"], "offsets": [87, 88, 89]}], "trigger": {"text": "evaluate", "tokens": ["evaluate"], "offsets": [85]}}, {"event_type": "FAC", "arguments": [{"text": "generated implications", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["generated", "implications"], "offsets": [95, 96]}], "trigger": {"text": "well formed and valid", "tokens": ["well", "formed", "and", "valid"], "offsets": [98, 99, 100, 101]}}, {"event_type": "FAC", "arguments": [{"text": "consistency evaluation", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["consistency", "evaluation"], "offsets": [103, 104]}, {"text": "crucial insights", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["crucial", "insights"], "offsets": [106, 107]}, {"text": "gaps in existing models", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["gaps", "in", "existing", "models"], "offsets": [109, 110, 111, 112]}], "trigger": {"text": "provides", "tokens": ["provides"], "offsets": [105]}}, {"event_type": "FAC", "arguments": [{"text": "retraining with implication - augmented data", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["retraining", "with", "implication", "-", "augmented", "data"], "offsets": [115, 116, 117, 118, 119, 120]}, {"text": "consistency", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["consistency"], "offsets": [122]}, {"text": "synthetic implications", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["synthetic", "implications"], "offsets": [125, 130]}, {"text": "human - generated implications", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["human", "-", "generated", "implications"], "offsets": [127, 128, 129, 130]}], "trigger": {"text": "improves", "tokens": ["improves"], "offsets": [121]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [60]}, {"text": "method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["method"], "offsets": [63]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [61]}}], "document": ["although", "current", "evaluation", "of", "question", "-", "answering", "systems", "treats", "predictions", "in", "isolation", ",", "we", "need", "to", "consider", "the", "relationship", "between", "predictions", "to", "measure", "true", "understanding", ".", "a", "model", "should", "be", "penalized", "for", "answering", "\u201c", "no", "\u201d", "to", "\u201c", "is", "the", "rose", "red", "?", "\u201d", "if", "it", "answers", "\u201c", "red", "\u201d", "to", "\u201c", "what", "color", "is", "the", "rose", "?", "\u201d", ".", "we", "propose", "a", "method", "to", "automatically", "extract", "such", "implications", "for", "instances", "from", "two", "qa", "datasets", ",", "vqa", "and", "squad", ",", "which", "we", "then", "use", "to", "evaluate", "the", "consistency", "of", "models", ".", "human", "evaluation", "shows", "these", "generated", "implications", "are", "well", "formed", "and", "valid", ".", "consistency", "evaluation", "provides", "crucial", "insights", "into", "gaps", "in", "existing", "models", ",", "while", "retraining", "with", "implication", "-", "augmented", "data", "improves", "consistency", "on", "both", "synthetic", "and", "human", "-", "generated", "implications", "."]}, {"venue": "ACL", "title": "ZeroShotCeres: Zero-Shot Relation Extraction from Semi-Structured Webpages", "abstract": "In many documents, such as semi-structured webpages, textual semantics are augmented with additional information conveyed using visual elements including layout, font size, and color. Prior work on information extraction from semi-structured websites has required learning an extraction model specific to a given template via either manually labeled or distantly supervised data from that template. In this work, we propose a solution for \u201czero-shot\u201d open-domain relation extraction from webpages with a previously unseen template, including from websites with little overlap with existing sources of knowledge for distant supervision and websites in entirely new subject verticals. Our model uses a graph neural network-based approach to build a rich representation of text fields on a webpage and the relationships between them, enabling generalization to new templates. Experiments show this approach provides a 31% F1 gain over a baseline for zero-shot extraction in a new subject vertical.", "doc_id": "2b99d5895357979bb1667807876b986f", "publication_year": 2020, "sentences": ["in many documents , such as semi - structured webpages , textual semantics are augmented with additional information conveyed using visual elements including layout , font size , and color .", "prior work on information extraction from semi - structured websites has required learning an extraction model specific to a given template via either manually labeled or distantly supervised data from that template .", "in this work , we propose a solution for \u201c zero - shot \u201d open - domain relation extraction from webpages with a previously unseen template , including from websites with little overlap with existing sources of knowledge for distant supervision and websites in entirely new subject verticals .", "our model uses a graph neural network - based approach to build a rich representation of text fields on a webpage and the relationships between them , enabling generalization to new templates .", "experiments show this approach provides a 31 % f1 gain over a baseline for zero - shot extraction in a new subject vertical ."], "events": [{"event_type": "RWS", "arguments": [{"text": "textual semantics", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["textual", "semantics"], "offsets": [11, 12]}, {"text": "layout", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["layout"], "offsets": [23]}, {"text": "font size", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["font", "size"], "offsets": [25, 26]}, {"text": "color", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["color"], "offsets": [29]}, {"text": "additional information", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["additional", "information"], "offsets": [16, 17]}], "trigger": {"text": "augmented", "tokens": ["augmented"], "offsets": [14]}}, {"event_type": "RWS", "arguments": [{"text": "extraction model specific to a given template", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["extraction", "model", "specific", "to", "a", "given", "template"], "offsets": [45, 46, 47, 48, 49, 50, 51]}, {"text": "either manually labeled or distantly supervised data", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["either", "manually", "labeled", "or", "distantly", "supervised", "data"], "offsets": [53, 54, 55, 56, 57, 58, 59]}, {"text": "prior work on information extraction from semi - structured websites", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["prior", "work", "on", "information", "extraction", "from", "semi", "-", "structured", "websites"], "offsets": [31, 32, 33, 34, 35, 36, 37, 38, 39, 40]}], "trigger": {"text": "learning", "tokens": ["learning"], "offsets": [43]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [68]}, {"text": "solution for \u201c zero - shot \u201d open - domain relation extraction", "nugget_type": "APP", "argument_type": "Content", "tokens": ["solution", "for", "\u201c", "zero", "-", "shot", "\u201d", "open", "-", "domain", "relation", "extraction"], "offsets": [71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [69]}}, {"event_type": "MDS", "arguments": [{"text": "graph neural network - based approach", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["graph", "neural", "network", "-", "based", "approach"], "offsets": [117, 118, 119, 120, 121, 122]}, {"text": "build", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["build"], "offsets": [124]}], "trigger": {"text": "uses", "tokens": ["uses"], "offsets": [115]}}, {"event_type": "PUR", "arguments": [{"text": "rich representation of text fields on a webpage", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["rich", "representation", "of", "text", "fields", "on", "a", "webpage"], "offsets": [126, 127, 128, 129, 130, 131, 132, 133]}, {"text": "relationships between them", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["relationships", "between", "rich", "representation", "of", "text", "fields", "on", "a", "webpage"], "offsets": [136, 137, 126, 127, 128, 129, 130, 131, 132, 133]}], "trigger": {"text": "build", "tokens": ["build"], "offsets": [124]}}, {"event_type": "MDS", "arguments": [{"text": "new templates", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["new", "templates"], "offsets": [143, 144]}], "trigger": {"text": "generalization", "tokens": ["generalization"], "offsets": [141]}}, {"event_type": "FIN", "arguments": [{"text": "provides", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["provides"], "offsets": [150]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [147]}}, {"event_type": "CMP", "arguments": [{"text": "graph neural network - based approach", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["graph", "neural", "network", "-", "based", "approach"], "offsets": [117, 118, 119, 120, 121, 122]}, {"text": "baseline for zero - shot extraction", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["baseline", "for", "zero", "-", "shot", "extraction"], "offsets": [158, 159, 160, 161, 162, 163]}, {"text": "in a new subject vertical", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "a", "new", "subject", "vertical"], "offsets": [164, 165, 166, 167, 168]}, {"text": "31 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["31", "%"], "offsets": [152, 153]}, {"text": "f1", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["f1"], "offsets": [154]}], "trigger": {"text": "provides", "tokens": ["provides"], "offsets": [150]}}], "document": ["in", "many", "documents", ",", "such", "as", "semi", "-", "structured", "webpages", ",", "textual", "semantics", "are", "augmented", "with", "additional", "information", "conveyed", "using", "visual", "elements", "including", "layout", ",", "font", "size", ",", "and", "color", ".", "prior", "work", "on", "information", "extraction", "from", "semi", "-", "structured", "websites", "has", "required", "learning", "an", "extraction", "model", "specific", "to", "a", "given", "template", "via", "either", "manually", "labeled", "or", "distantly", "supervised", "data", "from", "that", "template", ".", "in", "this", "work", ",", "we", "propose", "a", "solution", "for", "\u201c", "zero", "-", "shot", "\u201d", "open", "-", "domain", "relation", "extraction", "from", "webpages", "with", "a", "previously", "unseen", "template", ",", "including", "from", "websites", "with", "little", "overlap", "with", "existing", "sources", "of", "knowledge", "for", "distant", "supervision", "and", "websites", "in", "entirely", "new", "subject", "verticals", ".", "our", "model", "uses", "a", "graph", "neural", "network", "-", "based", "approach", "to", "build", "a", "rich", "representation", "of", "text", "fields", "on", "a", "webpage", "and", "the", "relationships", "between", "them", ",", "enabling", "generalization", "to", "new", "templates", ".", "experiments", "show", "this", "approach", "provides", "a", "31", "%", "f1", "gain", "over", "a", "baseline", "for", "zero", "-", "shot", "extraction", "in", "a", "new", "subject", "vertical", "."]}, {"venue": "ACL", "title": "ParaDetox: Detoxification with Parallel Data", "abstract": "We present a novel pipeline for the collection of parallel data for the detoxification task. We collect non-toxic paraphrases for over 10,000 English toxic sentences. We also show that this pipeline can be used to distill a large existing corpus of paraphrases to get toxic-neutral sentence pairs. We release two parallel corpora which can be used for the training of detoxification models. To the best of our knowledge, these are the first parallel datasets for this task.We describe our pipeline in detail to make it fast to set up for a new language or domain, thus contributing to faster and easier development of new parallel resources.We train several detoxification models on the collected data and compare them with several baselines and state-of-the-art unsupervised approaches. We conduct both automatic and manual evaluations. All models trained on parallel data outperform the state-of-the-art unsupervised models by a large margin. This suggests that our novel datasets can boost the performance of detoxification systems.", "doc_id": "95dc3036d0e801e9dbcf7cba21ba7204", "publication_year": 2022, "sentences": ["we present a novel pipeline for the collection of parallel data for the detoxification task .", "we collect non - toxic paraphrases for over 10 , 000 english toxic sentences .", "we also show that this pipeline can be used to distill a large existing corpus of paraphrases to get toxic - neutral sentence pairs .", "we release two parallel corpora which can be used for the training of detoxification models .", "to the best of our knowledge , these are the first parallel datasets for this task .", "we describe our pipeline in detail to make it fast to set up for a new language or domain , thus contributing to faster and easier development of new parallel resources .", "we train several detoxification models on the collected data and compare them with several baselines and state - of - the - art unsupervised approaches .", "we conduct both automatic and manual evaluations .", "all models trained on parallel data outperform the state - of - the - art unsupervised models by a large margin .", "this suggests that our novel datasets can boost the performance of detoxification systems ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [0]}, {"text": "pipeline", "nugget_type": "APP", "argument_type": "Content", "tokens": ["pipeline"], "offsets": [4]}, {"text": "collection", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["collection"], "offsets": [7]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [1]}}, {"event_type": "PUR", "arguments": [{"text": "parallel data", "nugget_type": "DST", "argument_type": "Aim", "tokens": ["parallel", "data"], "offsets": [9, 10]}, {"text": "for the detoxification task", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["for", "the", "detoxification", "task"], "offsets": [11, 12, 13, 14]}], "trigger": {"text": "collection", "tokens": ["collection"], "offsets": [7]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [16]}, {"text": "non - toxic paraphrases", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["non", "-", "toxic", "paraphrases"], "offsets": [18, 19, 20, 21]}, {"text": "10 , 000 english toxic sentences", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["10", ",", "000", "english", "toxic", "sentences"], "offsets": [24, 25, 26, 27, 28, 29]}], "trigger": {"text": "collect", "tokens": ["collect"], "offsets": [17]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [31]}, {"text": "distill", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["distill"], "offsets": [41]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [33]}}, {"event_type": "FAC", "arguments": [{"text": "pipeline", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["pipeline"], "offsets": [36]}, {"text": "large existing corpus of paraphrases", "nugget_type": "DST", "argument_type": "Object", "tokens": ["large", "existing", "corpus", "of", "paraphrases"], "offsets": [43, 44, 45, 46, 47]}, {"text": "get", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["get"], "offsets": [49]}], "trigger": {"text": "distill", "tokens": ["distill"], "offsets": [41]}}, {"event_type": "PUR", "arguments": [{"text": "toxic - neutral sentence pairs", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["toxic", "-", "neutral", "sentence", "pairs"], "offsets": [50, 51, 52, 53, 54]}], "trigger": {"text": "get", "tokens": ["get"], "offsets": [49]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [56]}, {"text": "two parallel corpora", "nugget_type": "DST", "argument_type": "Content", "tokens": ["two", "parallel", "corpora"], "offsets": [58, 59, 60]}, {"text": "training", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["training"], "offsets": [67]}], "trigger": {"text": "release", "tokens": ["release"], "offsets": [57]}}, {"event_type": "PUR", "arguments": [{"text": "detoxification models", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["detoxification", "models"], "offsets": [69, 70]}], "trigger": {"text": "training", "tokens": ["training"], "offsets": [67]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [89]}, {"text": "pipeline", "nugget_type": "APP", "argument_type": "Content", "tokens": ["pipeline"], "offsets": [92]}, {"text": "set up", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["set", "up"], "offsets": [100, 101]}, {"text": "contributing", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["contributing"], "offsets": [110]}], "trigger": {"text": "describe", "tokens": ["describe"], "offsets": [90]}}, {"event_type": "PUR", "arguments": [{"text": "new language", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["new", "language"], "offsets": [104, 105]}, {"text": "new domain", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["new", "domain"], "offsets": [104, 107]}], "trigger": {"text": "set up", "tokens": ["set", "up"], "offsets": [100, 101]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [121]}, {"text": "several detoxification models", "nugget_type": "APP", "argument_type": "Content", "tokens": ["several", "detoxification", "models"], "offsets": [123, 124, 125]}, {"text": "collected data", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["collected", "data"], "offsets": [128, 129]}], "trigger": {"text": "train", "tokens": ["train"], "offsets": [122]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [121]}, {"text": "several detoxification models", "nugget_type": "APP", "argument_type": "Content", "tokens": ["several", "detoxification", "models"], "offsets": [123, 124, 125]}, {"text": "several baselines and state - of - the - art unsupervised approaches", "nugget_type": "APP", "argument_type": "Content", "tokens": ["several", "baselines", "and", "state", "-", "of", "-", "the", "-", "art", "unsupervised", "approaches"], "offsets": [134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145]}], "trigger": {"text": "compare", "tokens": ["compare"], "offsets": [131]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [147]}, {"text": "automatic evaluations", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["automatic", "evaluations"], "offsets": [150, 153]}, {"text": "manual evaluations", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["manual", "evaluations"], "offsets": [152, 153]}], "trigger": {"text": "conduct", "tokens": ["conduct"], "offsets": [148]}}, {"event_type": "CMP", "arguments": [{"text": "all models trained on parallel data", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["all", "models", "trained", "on", "parallel", "data"], "offsets": [155, 156, 157, 158, 159, 160]}, {"text": "outperform", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperform"], "offsets": [161]}], "trigger": {"text": "outperform", "tokens": ["outperform"], "offsets": [161]}}, {"event_type": "FIN", "arguments": [{"text": "boost", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["boost"], "offsets": [184]}], "trigger": {"text": "suggests", "tokens": ["suggests"], "offsets": [178]}}, {"event_type": "FAC", "arguments": [{"text": "performance of detoxification systems", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["performance", "of", "detoxification", "systems"], "offsets": [186, 187, 188, 189]}, {"text": "two parallel corpora", "nugget_type": "DST", "argument_type": "Subject", "tokens": ["two", "parallel", "corpora"], "offsets": [58, 59, 60]}], "trigger": {"text": "boost", "tokens": ["boost"], "offsets": [184]}}, {"event_type": "PUR", "arguments": [{"text": "faster and easier development of new parallel resources", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["faster", "and", "easier", "development", "of", "new", "parallel", "resources"], "offsets": [112, 113, 114, 115, 116, 117, 118, 119]}], "trigger": {"text": "contributing", "tokens": ["contributing"], "offsets": [110]}}], "document": ["we", "present", "a", "novel", "pipeline", "for", "the", "collection", "of", "parallel", "data", "for", "the", "detoxification", "task", ".", "we", "collect", "non", "-", "toxic", "paraphrases", "for", "over", "10", ",", "000", "english", "toxic", "sentences", ".", "we", "also", "show", "that", "this", "pipeline", "can", "be", "used", "to", "distill", "a", "large", "existing", "corpus", "of", "paraphrases", "to", "get", "toxic", "-", "neutral", "sentence", "pairs", ".", "we", "release", "two", "parallel", "corpora", "which", "can", "be", "used", "for", "the", "training", "of", "detoxification", "models", ".", "to", "the", "best", "of", "our", "knowledge", ",", "these", "are", "the", "first", "parallel", "datasets", "for", "this", "task", ".", "we", "describe", "our", "pipeline", "in", "detail", "to", "make", "it", "fast", "to", "set", "up", "for", "a", "new", "language", "or", "domain", ",", "thus", "contributing", "to", "faster", "and", "easier", "development", "of", "new", "parallel", "resources", ".", "we", "train", "several", "detoxification", "models", "on", "the", "collected", "data", "and", "compare", "them", "with", "several", "baselines", "and", "state", "-", "of", "-", "the", "-", "art", "unsupervised", "approaches", ".", "we", "conduct", "both", "automatic", "and", "manual", "evaluations", ".", "all", "models", "trained", "on", "parallel", "data", "outperform", "the", "state", "-", "of", "-", "the", "-", "art", "unsupervised", "models", "by", "a", "large", "margin", ".", "this", "suggests", "that", "our", "novel", "datasets", "can", "boost", "the", "performance", "of", "detoxification", "systems", "."]}, {"venue": "ACL", "title": "End-to-End Neural Word Alignment Outperforms GIZA++", "abstract": "Word alignment was once a core unsupervised learning task in natural language processing because of its essential role in training statistical machine translation (MT) models. Although unnecessary for training neural MT models, word alignment still plays an important role in interactive applications of neural machine translation, such as annotation transfer and lexicon injection. While statistical MT methods have been replaced by neural approaches with superior performance, the twenty-year-old GIZA++ toolkit remains a key component of state-of-the-art word alignment systems. Prior work on neural word alignment has only been able to outperform GIZA++ by using its output during training. We present the first end-to-end neural word alignment method that consistently outperforms GIZA++ on three data sets. Our approach repurposes a Transformer model trained for supervised translation to also serve as an unsupervised word alignment model in a manner that is tightly integrated and does not affect translation quality.", "doc_id": "2ba6577c21970ea17fbad60397347c6c", "publication_year": 2020, "sentences": ["word alignment was once a core unsupervised learning task in natural language processing because of its essential role in training statistical machine translation ( mt ) models .", "although unnecessary for training neural mt models , word alignment still plays an important role in interactive applications of neural machine translation , such as annotation transfer and lexicon injection .", "while statistical mt methods have been replaced by neural approaches with superior performance , the twenty - year - old giza + + toolkit remains a key component of state - of - the - art word alignment systems .", "prior work on neural word alignment has only been able to outperform giza + + by using its output during training .", "we present the first end - to - end neural word alignment method that consistently outperforms giza + + on three data sets .", "our approach repurposes a transformer model trained for supervised translation to also serve as an unsupervised word alignment model in a manner that is tightly integrated and does not affect translation quality ."], "events": [{"event_type": "ITT", "arguments": [{"text": "word alignment", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["word", "alignment"], "offsets": [0, 1]}, {"text": "in natural language processing", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "natural", "language", "processing"], "offsets": [9, 10, 11, 12]}], "trigger": {"text": "task", "tokens": ["task"], "offsets": [8]}}, {"event_type": "RWS", "arguments": [{"text": "prior work on neural word alignment", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["prior", "work", "on", "neural", "word", "alignment"], "offsets": [99, 100, 101, 102, 103, 104]}, {"text": "its output", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["giza", "+", "+", "output"], "offsets": [111, 112, 113, 117]}, {"text": "during training", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["during", "training"], "offsets": [118, 119]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [115]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [121]}, {"text": "first end - to - end neural word alignment method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["first", "end", "-", "to", "-", "end", "neural", "word", "alignment", "method"], "offsets": [124, 125, 126, 127, 128, 129, 130, 131, 132, 133]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [122]}}, {"event_type": "CMP", "arguments": [{"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [136]}, {"text": "giza + +", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["giza", "+", "+"], "offsets": [137, 138, 139]}, {"text": "three data sets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["three", "data", "sets"], "offsets": [141, 142, 143]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [136]}}, {"event_type": "MDS", "arguments": [{"text": "transformer model trained for supervised translation", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["transformer", "model", "trained", "for", "supervised", "translation"], "offsets": [149, 150, 151, 152, 153, 154]}, {"text": "unsupervised word alignment model", "nugget_type": "APP", "argument_type": "Target", "tokens": ["unsupervised", "word", "alignment", "model"], "offsets": [160, 161, 162, 163]}, {"text": "in a manner that is tightly integrated and does not affect translation quality", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "a", "manner", "that", "is", "tightly", "integrated", "and", "does", "not", "affect", "translation", "quality"], "offsets": [164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176]}], "trigger": {"text": "repurposes", "tokens": ["repurposes"], "offsets": [147]}}], "document": ["word", "alignment", "was", "once", "a", "core", "unsupervised", "learning", "task", "in", "natural", "language", "processing", "because", "of", "its", "essential", "role", "in", "training", "statistical", "machine", "translation", "(", "mt", ")", "models", ".", "although", "unnecessary", "for", "training", "neural", "mt", "models", ",", "word", "alignment", "still", "plays", "an", "important", "role", "in", "interactive", "applications", "of", "neural", "machine", "translation", ",", "such", "as", "annotation", "transfer", "and", "lexicon", "injection", ".", "while", "statistical", "mt", "methods", "have", "been", "replaced", "by", "neural", "approaches", "with", "superior", "performance", ",", "the", "twenty", "-", "year", "-", "old", "giza", "+", "+", "toolkit", "remains", "a", "key", "component", "of", "state", "-", "of", "-", "the", "-", "art", "word", "alignment", "systems", ".", "prior", "work", "on", "neural", "word", "alignment", "has", "only", "been", "able", "to", "outperform", "giza", "+", "+", "by", "using", "its", "output", "during", "training", ".", "we", "present", "the", "first", "end", "-", "to", "-", "end", "neural", "word", "alignment", "method", "that", "consistently", "outperforms", "giza", "+", "+", "on", "three", "data", "sets", ".", "our", "approach", "repurposes", "a", "transformer", "model", "trained", "for", "supervised", "translation", "to", "also", "serve", "as", "an", "unsupervised", "word", "alignment", "model", "in", "a", "manner", "that", "is", "tightly", "integrated", "and", "does", "not", "affect", "translation", "quality", "."]}, {"venue": "ACL", "title": "Transformers to Learn Hierarchical Contexts in Multiparty Dialogue for Span-based Question Answering", "abstract": "We introduce a novel approach to transformers that learns hierarchical representations in multiparty dialogue. First, three language modeling tasks are used to pre-train the transformers, token- and utterance-level language modeling and utterance order prediction, that learn both token and utterance embeddings for better understanding in dialogue contexts. Then, multi-task learning between the utterance prediction and the token span prediction is applied to fine-tune for span-based question answering (QA). Our approach is evaluated on the FriendsQA dataset and shows improvements of 3.8% and 1.4% over the two state-of-the-art transformer models, BERT and RoBERTa, respectively.", "doc_id": "93c671a57c7ef31a7bc36a53c8ee0e01", "publication_year": 2020, "sentences": ["we introduce a novel approach to transformers that learns hierarchical representations in multiparty dialogue .", "first , three language modeling tasks are used to pre - train the transformers , token - and utterance - level language modeling and utterance order prediction , that learn both token and utterance embeddings for better understanding in dialogue contexts .", "then , multi - task learning between the utterance prediction and the token span prediction is applied to fine - tune for span - based question answering ( qa ) .", "our approach is evaluated on the friendsqa dataset and shows improvements of 3 . 8 % and 1 . 4 % over the two state - of - the - art transformer models , bert and roberta , respectively ."], "events": [{"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [0]}, {"text": "approach to transformers", "nugget_type": "APP", "argument_type": "Content", "tokens": ["approach", "to", "transformers"], "offsets": [4, 5, 6]}, {"text": "learns", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["learns"], "offsets": [8]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [1]}}, {"event_type": "WKS", "arguments": [{"text": "three language modeling tasks", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["three", "language", "modeling", "tasks"], "offsets": [17, 18, 19, 20]}, {"text": "pre - train", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["pre", "-", "train"], "offsets": [24, 25, 26]}], "trigger": {"text": "used", "tokens": ["used"], "offsets": [22]}}, {"event_type": "PUR", "arguments": [{"text": "transformers", "nugget_type": "MOD", "argument_type": "Aim", "tokens": ["transformers"], "offsets": [28]}, {"text": "token - and utterance - level language modeling", "nugget_type": "MOD", "argument_type": "Aim", "tokens": ["token", "-", "and", "utterance", "-", "level", "language", "modeling"], "offsets": [30, 31, 32, 33, 34, 35, 36, 37]}, {"text": "utterance order prediction", "nugget_type": "MOD", "argument_type": "Aim", "tokens": ["utterance", "order", "prediction"], "offsets": [39, 40, 41]}], "trigger": {"text": "pre - train", "tokens": ["pre", "-", "train"], "offsets": [24, 25, 26]}}, {"event_type": "MDS", "arguments": [{"text": "better understanding in dialogue contexts", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["better", "understanding", "in", "dialogue", "contexts"], "offsets": [51, 52, 53, 54, 55]}, {"text": "token", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["token"], "offsets": [46]}, {"text": "utterance embeddings", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["utterance", "embeddings"], "offsets": [48, 49]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [44]}}, {"event_type": "WKS", "arguments": [{"text": "multi - task learning", "nugget_type": "APP", "argument_type": "Content", "tokens": ["multi", "-", "task", "learning"], "offsets": [59, 60, 61, 62]}, {"text": "fine - tune", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["fine", "-", "tune"], "offsets": [75, 76, 77]}], "trigger": {"text": "applied", "tokens": ["applied"], "offsets": [73]}}, {"event_type": "PUR", "arguments": [{"text": "span - based question answering", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["span", "-", "based", "question", "answering"], "offsets": [79, 80, 81, 82, 83]}], "trigger": {"text": "fine - tune", "tokens": ["fine", "-", "tune"], "offsets": [75, 76, 77]}}, {"event_type": "CMP", "arguments": [{"text": "improvements", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improvements"], "offsets": [98]}, {"text": "3 . 8 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["3", ".", "8", "%"], "offsets": [100, 101, 102, 103]}], "trigger": {"text": "improvements", "tokens": ["improvements"], "offsets": [98]}}, {"event_type": "CMP", "arguments": [{"text": "improvements", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improvements"], "offsets": [98]}, {"text": "1 . 4 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["1", ".", "4", "%"], "offsets": [105, 106, 107, 108]}, {"text": "roberta", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["roberta"], "offsets": [124]}], "trigger": {"text": "improvements", "tokens": ["improvements"], "offsets": [98]}}, {"event_type": "PUR", "arguments": [{"text": "hierarchical representations", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["hierarchical", "representations"], "offsets": [9, 10]}], "trigger": {"text": "learns", "tokens": ["learns"], "offsets": [8]}}, {"event_type": "WKS", "arguments": [{"text": "approach to transformers", "nugget_type": "APP", "argument_type": "Content", "tokens": ["approach", "to", "transformers"], "offsets": [4, 5, 6]}, {"text": "friendsqa dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["friendsqa", "dataset"], "offsets": [94, 95]}], "trigger": {"text": "evaluated", "tokens": ["evaluated"], "offsets": [91]}}], "document": ["we", "introduce", "a", "novel", "approach", "to", "transformers", "that", "learns", "hierarchical", "representations", "in", "multiparty", "dialogue", ".", "first", ",", "three", "language", "modeling", "tasks", "are", "used", "to", "pre", "-", "train", "the", "transformers", ",", "token", "-", "and", "utterance", "-", "level", "language", "modeling", "and", "utterance", "order", "prediction", ",", "that", "learn", "both", "token", "and", "utterance", "embeddings", "for", "better", "understanding", "in", "dialogue", "contexts", ".", "then", ",", "multi", "-", "task", "learning", "between", "the", "utterance", "prediction", "and", "the", "token", "span", "prediction", "is", "applied", "to", "fine", "-", "tune", "for", "span", "-", "based", "question", "answering", "(", "qa", ")", ".", "our", "approach", "is", "evaluated", "on", "the", "friendsqa", "dataset", "and", "shows", "improvements", "of", "3", ".", "8", "%", "and", "1", ".", "4", "%", "over", "the", "two", "state", "-", "of", "-", "the", "-", "art", "transformer", "models", ",", "bert", "and", "roberta", ",", "respectively", "."]}, {"venue": "ACL", "title": "Recollection versus Imagination: Exploring Human Memory and Cognition via Neural Language Models", "abstract": "We investigate the use of NLP as a measure of the cognitive processes involved in storytelling, contrasting imagination and recollection of events. To facilitate this, we collect and release Hippocorpus, a dataset of 7,000 stories about imagined and recalled events. We introduce a measure of narrative flow and use this to examine the narratives for imagined and recalled events. Additionally, we measure the differential recruitment of knowledge attributed to semantic memory versus episodic memory (Tulving, 1972) for imagined and recalled storytelling by comparing the frequency of descriptions of general commonsense events with more specific realis events. Our analyses show that imagined stories have a substantially more linear narrative flow, compared to recalled stories in which adjacent sentences are more disconnected. In addition, while recalled stories rely more on autobiographical events based on episodic memory, imagined stories express more commonsense knowledge based on semantic memory. Finally, our measures reveal the effect of narrativization of memories in stories (e.g., stories about frequently recalled memories flow more linearly; Bartlett, 1932). Our findings highlight the potential of using NLP tools to study the traces of human cognition in language.", "doc_id": "47f62ea6200f15ab9fd85610e711901e", "publication_year": 2020, "sentences": ["we investigate the use of nlp as a measure of the cognitive processes involved in storytelling , contrasting imagination and recollection of events .", "to facilitate this , we collect and release hippocorpus , a dataset of 7 , 000 stories about imagined and recalled events .", "we introduce a measure of narrative flow and use this to examine the narratives for imagined and recalled events .", "additionally , we measure the differential recruitment of knowledge attributed to semantic memory versus episodic memory ( tulving , 1972 ) for imagined and recalled storytelling by comparing the frequency of descriptions of general commonsense events with more specific realis events .", "our analyses show that imagined stories have a substantially more linear narrative flow , compared to recalled stories in which adjacent sentences are more disconnected .", "in addition , while recalled stories rely more on autobiographical events based on episodic memory , imagined stories express more commonsense knowledge based on semantic memory .", "finally , our measures reveal the effect of narrativization of memories in stories ( e . g . , stories about frequently recalled memories flow more linearly ; bartlett , 1932 ) .", "our findings highlight the potential of using nlp tools to study the traces of human cognition in language ."], "events": [{"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [0]}, {"text": "use of nlp", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["use", "of", "nlp"], "offsets": [3, 4, 5]}, {"text": "measure of the cognitive processes involved in storytelling", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["measure", "of", "the", "cognitive", "processes", "involved", "in", "storytelling"], "offsets": [8, 9, 10, 11, 12, 13, 14, 15]}], "trigger": {"text": "investigate", "tokens": ["investigate"], "offsets": [1]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [28]}, {"text": "hippocorpus", "nugget_type": "DST", "argument_type": "Content", "tokens": ["hippocorpus"], "offsets": [32]}], "trigger": {"text": "collect and release", "tokens": ["collect", "and", "release"], "offsets": [29, 30, 31]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [47]}, {"text": "examine", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["examine"], "offsets": [58]}, {"text": "measure of narrative flow", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["measure", "of", "narrative", "flow"], "offsets": [50, 51, 52, 53]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [48]}}, {"event_type": "PUR", "arguments": [{"text": "narratives for imagined and recalled events", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["narratives", "for", "imagined", "and", "recalled", "events"], "offsets": [60, 61, 62, 63, 64, 65]}], "trigger": {"text": "examine", "tokens": ["examine"], "offsets": [58]}}, {"event_type": "MDS", "arguments": [{"text": "more specific realis events", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["more", "specific", "realis", "events"], "offsets": [104, 105, 106, 107]}, {"text": "frequency of descriptions of general commonsense events", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["frequency", "of", "descriptions", "of", "general", "commonsense", "events"], "offsets": [96, 97, 98, 99, 100, 101, 102]}], "trigger": {"text": "comparing", "tokens": ["comparing"], "offsets": [94]}}, {"event_type": "FIN", "arguments": [{"text": "compared", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["compared"], "offsets": [123]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [111]}}, {"event_type": "CMP", "arguments": [{"text": "substantially more linear narrative flow", "nugget_type": "STR", "argument_type": "Result", "tokens": ["substantially", "more", "linear", "narrative", "flow"], "offsets": [117, 118, 119, 120, 121]}, {"text": "imagined stories", "nugget_type": "FEA", "argument_type": "Arg1", "tokens": ["imagined", "stories"], "offsets": [113, 114]}, {"text": "recalled stories", "nugget_type": "FEA", "argument_type": "Arg2", "tokens": ["recalled", "stories"], "offsets": [125, 126]}], "trigger": {"text": "compared", "tokens": ["compared"], "offsets": [123]}}, {"event_type": "CMP", "arguments": [{"text": "more commonsense knowledge", "nugget_type": "STR", "argument_type": "Result", "tokens": ["more", "commonsense", "knowledge"], "offsets": [154, 155, 156]}, {"text": "based on semantic memory", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["based", "on", "semantic", "memory"], "offsets": [157, 158, 159, 160]}, {"text": "imagined stories", "nugget_type": "FEA", "argument_type": "Arg1", "tokens": ["imagined", "stories"], "offsets": [151, 152]}, {"text": "recalled stories", "nugget_type": "FEA", "argument_type": "Arg2", "tokens": ["recalled", "stories"], "offsets": [139, 140]}], "trigger": {"text": "express", "tokens": ["express"], "offsets": [153]}}, {"event_type": "FAC", "arguments": [{"text": "measure of narrative flow", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["measure", "of", "narrative", "flow"], "offsets": [50, 51, 52, 53]}, {"text": "effect of narrativization of memories", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["effect", "of", "narrativization", "of", "memories"], "offsets": [168, 169, 170, 171, 172]}], "trigger": {"text": "reveal", "tokens": ["reveal"], "offsets": [166]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [69]}, {"text": "differential recruitment of knowledge", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["differential", "recruitment", "of", "knowledge"], "offsets": [72, 73, 74, 75]}, {"text": "imagined storytelling", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["imagined", "storytelling"], "offsets": [89, 92]}, {"text": "recalled storytelling", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["recalled", "storytelling"], "offsets": [91, 92]}], "trigger": {"text": "measure", "tokens": ["measure"], "offsets": [70]}}], "document": ["we", "investigate", "the", "use", "of", "nlp", "as", "a", "measure", "of", "the", "cognitive", "processes", "involved", "in", "storytelling", ",", "contrasting", "imagination", "and", "recollection", "of", "events", ".", "to", "facilitate", "this", ",", "we", "collect", "and", "release", "hippocorpus", ",", "a", "dataset", "of", "7", ",", "000", "stories", "about", "imagined", "and", "recalled", "events", ".", "we", "introduce", "a", "measure", "of", "narrative", "flow", "and", "use", "this", "to", "examine", "the", "narratives", "for", "imagined", "and", "recalled", "events", ".", "additionally", ",", "we", "measure", "the", "differential", "recruitment", "of", "knowledge", "attributed", "to", "semantic", "memory", "versus", "episodic", "memory", "(", "tulving", ",", "1972", ")", "for", "imagined", "and", "recalled", "storytelling", "by", "comparing", "the", "frequency", "of", "descriptions", "of", "general", "commonsense", "events", "with", "more", "specific", "realis", "events", ".", "our", "analyses", "show", "that", "imagined", "stories", "have", "a", "substantially", "more", "linear", "narrative", "flow", ",", "compared", "to", "recalled", "stories", "in", "which", "adjacent", "sentences", "are", "more", "disconnected", ".", "in", "addition", ",", "while", "recalled", "stories", "rely", "more", "on", "autobiographical", "events", "based", "on", "episodic", "memory", ",", "imagined", "stories", "express", "more", "commonsense", "knowledge", "based", "on", "semantic", "memory", ".", "finally", ",", "our", "measures", "reveal", "the", "effect", "of", "narrativization", "of", "memories", "in", "stories", "(", "e", ".", "g", ".", ",", "stories", "about", "frequently", "recalled", "memories", "flow", "more", "linearly", ";", "bartlett", ",", "1932", ")", ".", "our", "findings", "highlight", "the", "potential", "of", "using", "nlp", "tools", "to", "study", "the", "traces", "of", "human", "cognition", "in", "language", "."]}, {"venue": "ACL", "title": "SpanMlt: A Span-based Multi-Task Learning Framework for Pair-wise Aspect and Opinion Terms Extraction", "abstract": "Aspect terms extraction and opinion terms extraction are two key problems of fine-grained Aspect Based Sentiment Analysis (ABSA). The aspect-opinion pairs can provide a global profile about a product or service for consumers and opinion mining systems. However, traditional methods can not directly output aspect-opinion pairs without given aspect terms or opinion terms. Although some recent co-extraction methods have been proposed to extract both terms jointly, they fail to extract them as pairs. To this end, this paper proposes an end-to-end method to solve the task of Pair-wise Aspect and Opinion Terms Extraction (PAOTE). Furthermore, this paper treats the problem from a perspective of joint term and relation extraction rather than under the sequence tagging formulation performed in most prior works. We propose a multi-task learning framework based on shared spans, where the terms are extracted under the supervision of span boundaries. Meanwhile, the pair-wise relations are jointly identified using the span representations. Extensive experiments show that our model consistently outperforms state-of-the-art methods.", "doc_id": "8311ef9d0ec81152dc6b27b619f8fcab", "publication_year": 2020, "sentences": ["aspect terms extraction and opinion terms extraction are two key problems of fine - grained aspect based sentiment analysis ( absa ) .", "the aspect - opinion pairs can provide a global profile about a product or service for consumers and opinion mining systems .", "however , traditional methods can not directly output aspect - opinion pairs without given aspect terms or opinion terms .", "although some recent co - extraction methods have been proposed to extract both terms jointly , they fail to extract them as pairs .", "to this end , this paper proposes an end - to - end method to solve the task of pair - wise aspect and opinion terms extraction ( paote ) .", "furthermore , this paper treats the problem from a perspective of joint term and relation extraction rather than under the sequence tagging formulation performed in most prior works .", "we propose a multi - task learning framework based on shared spans , where the terms are extracted under the supervision of span boundaries .", "meanwhile , the pair - wise relations are jointly identified using the span representations .", "extensive experiments show that our model consistently outperforms state - of - the - art methods ."], "events": [{"event_type": "ITT", "arguments": [{"text": "aspect terms extraction", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["aspect", "terms", "extraction"], "offsets": [0, 1, 2]}, {"text": "opinion terms extraction", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["opinion", "terms", "extraction"], "offsets": [4, 5, 6]}], "trigger": {"text": "problems", "tokens": ["problems"], "offsets": [10]}}, {"event_type": "RWF", "arguments": [{"text": "traditional methods", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["traditional", "methods"], "offsets": [47, 48]}, {"text": "aspect - opinion pairs", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["aspect", "-", "opinion", "pairs"], "offsets": [53, 54, 55, 56]}, {"text": "without given aspect terms or opinion terms", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["without", "given", "aspect", "terms", "or", "opinion", "terms"], "offsets": [57, 58, 59, 60, 61, 62, 63]}], "trigger": {"text": "not directly output", "tokens": ["not", "directly", "output"], "offsets": [50, 51, 52]}}, {"event_type": "RWF", "arguments": [{"text": "recent co - extraction methods", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["recent", "co", "-", "extraction", "methods"], "offsets": [67, 68, 69, 70, 71]}, {"text": "aspect - opinion pairs", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["aspect", "-", "opinion", "pairs"], "offsets": [53, 54, 55, 56]}], "trigger": {"text": "fail to extract", "tokens": ["fail", "to", "extract"], "offsets": [82, 83, 84]}}, {"event_type": "PRP", "arguments": [{"text": "end - to - end method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["end", "-", "to", "-", "end", "method"], "offsets": [97, 98, 99, 100, 101, 102]}, {"text": "solve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["solve"], "offsets": [104]}], "trigger": {"text": "proposes", "tokens": ["proposes"], "offsets": [95]}}, {"event_type": "PUR", "arguments": [{"text": "pair - wise aspect and opinion terms extraction", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["pair", "-", "wise", "aspect", "and", "opinion", "terms", "extraction"], "offsets": [108, 109, 110, 111, 112, 113, 114, 115]}], "trigger": {"text": "solve", "tokens": ["solve"], "offsets": [104]}}, {"event_type": "WKS", "arguments": [{"text": "pair - wise aspect and opinion terms extraction", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["pair", "-", "wise", "aspect", "and", "opinion", "terms", "extraction"], "offsets": [108, 109, 110, 111, 112, 113, 114, 115]}, {"text": "from a perspective of joint term and relation extraction", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "a", "perspective", "of", "joint", "term", "and", "relation", "extraction"], "offsets": [127, 128, 129, 130, 131, 132, 133, 134, 135]}], "trigger": {"text": "treats", "tokens": ["treats"], "offsets": [124]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [149]}, {"text": "multi - task learning framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["multi", "-", "task", "learning", "framework"], "offsets": [152, 153, 154, 155, 156]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [150]}}, {"event_type": "MDS", "arguments": [{"text": "pair - wise relations", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["pair", "-", "wise", "relations"], "offsets": [177, 178, 179, 180]}, {"text": "span representations", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["span", "representations"], "offsets": [186, 187]}], "trigger": {"text": "jointly identified", "tokens": ["jointly", "identified"], "offsets": [182, 183]}}, {"event_type": "FIN", "arguments": [{"text": "outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforms"], "offsets": [196]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [191]}}, {"event_type": "CMP", "arguments": [{"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [196]}, {"text": "state - of - the - art methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art", "methods"], "offsets": [197, 198, 199, 200, 201, 202, 203, 204]}, {"text": "consistently", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["consistently"], "offsets": [195]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [196]}}], "document": ["aspect", "terms", "extraction", "and", "opinion", "terms", "extraction", "are", "two", "key", "problems", "of", "fine", "-", "grained", "aspect", "based", "sentiment", "analysis", "(", "absa", ")", ".", "the", "aspect", "-", "opinion", "pairs", "can", "provide", "a", "global", "profile", "about", "a", "product", "or", "service", "for", "consumers", "and", "opinion", "mining", "systems", ".", "however", ",", "traditional", "methods", "can", "not", "directly", "output", "aspect", "-", "opinion", "pairs", "without", "given", "aspect", "terms", "or", "opinion", "terms", ".", "although", "some", "recent", "co", "-", "extraction", "methods", "have", "been", "proposed", "to", "extract", "both", "terms", "jointly", ",", "they", "fail", "to", "extract", "them", "as", "pairs", ".", "to", "this", "end", ",", "this", "paper", "proposes", "an", "end", "-", "to", "-", "end", "method", "to", "solve", "the", "task", "of", "pair", "-", "wise", "aspect", "and", "opinion", "terms", "extraction", "(", "paote", ")", ".", "furthermore", ",", "this", "paper", "treats", "the", "problem", "from", "a", "perspective", "of", "joint", "term", "and", "relation", "extraction", "rather", "than", "under", "the", "sequence", "tagging", "formulation", "performed", "in", "most", "prior", "works", ".", "we", "propose", "a", "multi", "-", "task", "learning", "framework", "based", "on", "shared", "spans", ",", "where", "the", "terms", "are", "extracted", "under", "the", "supervision", "of", "span", "boundaries", ".", "meanwhile", ",", "the", "pair", "-", "wise", "relations", "are", "jointly", "identified", "using", "the", "span", "representations", ".", "extensive", "experiments", "show", "that", "our", "model", "consistently", "outperforms", "state", "-", "of", "-", "the", "-", "art", "methods", "."]}, {"venue": "ACL", "title": "Adaptive Attention Span in Transformers", "abstract": "We propose a novel self-attention mechanism that can learn its optimal attention span. This allows us to extend significantly the maximum context size used in Transformer, while maintaining control over their memory footprint and computational time. We show the effectiveness of our approach on the task of character level language modeling, where we achieve state-of-the-art performances on text8 and enwiki8 by using a maximum context of 8k characters.", "doc_id": "4dfc16c44cae914e158a49ce45b56833", "publication_year": 2019, "sentences": ["we propose a novel self - attention mechanism that can learn its optimal attention span .", "this allows us to extend significantly the maximum context size used in transformer , while maintaining control over their memory footprint and computational time .", "we show the effectiveness of our approach on the task of character level language modeling , where we achieve state - of - the - art performances on text8 and enwiki8 by using a maximum context of 8k characters ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [0]}, {"text": "learn", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["learn"], "offsets": [10]}, {"text": "self - attention mechanism", "nugget_type": "APP", "argument_type": "Content", "tokens": ["self", "-", "attention", "mechanism"], "offsets": [4, 5, 6, 7]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [1]}}, {"event_type": "PUR", "arguments": [{"text": "optimal attention span", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["optimal", "attention", "span"], "offsets": [12, 13, 14]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [10]}}, {"event_type": "MDS", "arguments": [{"text": "maximum context size", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["maximum", "context", "size"], "offsets": [23, 24, 25]}, {"text": "in transformer", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "transformer"], "offsets": [27, 28]}], "trigger": {"text": "extend", "tokens": ["extend"], "offsets": [20]}}, {"event_type": "MDS", "arguments": [{"text": "over their memory footprint and computational time", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["over", "their", "memory", "footprint", "and", "computational", "time"], "offsets": [33, 34, 35, 36, 37, 38, 39]}, {"text": "control", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["control"], "offsets": [32]}], "trigger": {"text": "maintaining", "tokens": ["maintaining"], "offsets": [31]}}, {"event_type": "FAC", "arguments": [{"text": "effectiveness of our approach", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["effectiveness", "of", "self", "-", "attention", "mechanism"], "offsets": [44, 45, 4, 5, 6, 7]}, {"text": "on the task of character level language modeling", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "the", "task", "of", "character", "level", "language", "modeling"], "offsets": [48, 49, 50, 51, 52, 53, 54, 55]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [42]}}, {"event_type": "FAC", "arguments": [{"text": "state - of - the - art performances", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art", "performances"], "offsets": [60, 61, 62, 63, 64, 65, 66, 67]}, {"text": "text8", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["text8"], "offsets": [69]}, {"text": "enwiki8", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["enwiki8"], "offsets": [71]}], "trigger": {"text": "achieve", "tokens": ["achieve"], "offsets": [59]}}], "document": ["we", "propose", "a", "novel", "self", "-", "attention", "mechanism", "that", "can", "learn", "its", "optimal", "attention", "span", ".", "this", "allows", "us", "to", "extend", "significantly", "the", "maximum", "context", "size", "used", "in", "transformer", ",", "while", "maintaining", "control", "over", "their", "memory", "footprint", "and", "computational", "time", ".", "we", "show", "the", "effectiveness", "of", "our", "approach", "on", "the", "task", "of", "character", "level", "language", "modeling", ",", "where", "we", "achieve", "state", "-", "of", "-", "the", "-", "art", "performances", "on", "text8", "and", "enwiki8", "by", "using", "a", "maximum", "context", "of", "8k", "characters", "."]}, {"venue": "ACL", "title": "Interpreting Twitter User Geolocation", "abstract": "Identifying user geolocation in online social networks is an essential task in many location-based applications. Existing methods rely on the similarity of text and network structure, however, they suffer from a lack of interpretability on the corresponding results, which is crucial for understanding model behavior. In this work, we adopt influence functions to interpret the behavior of GNN-based models by identifying the importance of training users when predicting the locations of the testing users. This methodology helps with providing meaningful explanations on prediction results. Furthermore, it also initiates an attempt to uncover the so-called \u201cblack-box\u201d GNN-based models by investigating the effect of individual nodes.", "doc_id": "453741a5d97c54352ff383ed3c4d8804", "publication_year": 2020, "sentences": ["identifying user geolocation in online social networks is an essential task in many location - based applications .", "existing methods rely on the similarity of text and network structure , however , they suffer from a lack of interpretability on the corresponding results , which is crucial for understanding model behavior .", "in this work , we adopt influence functions to interpret the behavior of gnn - based models by identifying the importance of training users when predicting the locations of the testing users .", "this methodology helps with providing meaningful explanations on prediction results .", "furthermore , it also initiates an attempt to uncover the so - called \u201c black - box \u201d gnn - based models by investigating the effect of individual nodes ."], "events": [{"event_type": "ITT", "arguments": [{"text": "identifying user geolocation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["identifying", "user", "geolocation"], "offsets": [0, 1, 2]}, {"text": "in online social networks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "online", "social", "networks"], "offsets": [3, 4, 5, 6]}], "trigger": {"text": "task", "tokens": ["task"], "offsets": [10]}}, {"event_type": "RWF", "arguments": [{"text": "existing methods", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["existing", "methods"], "offsets": [18, 19]}, {"text": "suffer", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["suffer"], "offsets": [33]}, {"text": "lack of interpretability", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["lack", "of", "interpretability"], "offsets": [36, 37, 38]}], "trigger": {"text": "suffer", "tokens": ["suffer"], "offsets": [33]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [56]}, {"text": "influence functions", "nugget_type": "APP", "argument_type": "Content", "tokens": ["influence", "functions"], "offsets": [58, 59]}, {"text": "interpret", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["interpret"], "offsets": [61]}], "trigger": {"text": "adopt", "tokens": ["adopt"], "offsets": [57]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [56]}, {"text": "importance of training users", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["importance", "of", "training", "users"], "offsets": [72, 73, 74, 75]}, {"text": "when predicting the locations of the testing users", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "predicting", "the", "locations", "of", "the", "testing", "users"], "offsets": [76, 77, 78, 79, 80, 81, 82, 83]}, {"text": "interpret", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["interpret"], "offsets": [61]}], "trigger": {"text": "identifying", "tokens": ["identifying"], "offsets": [70]}}, {"event_type": "PUR", "arguments": [{"text": "behavior of gnn - based models", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["behavior", "of", "gnn", "-", "based", "models"], "offsets": [63, 64, 65, 66, 67, 68]}], "trigger": {"text": "interpret", "tokens": ["interpret"], "offsets": [61]}}, {"event_type": "WKS", "arguments": [{"text": "effect of individual nodes", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["effect", "of", "individual", "nodes"], "offsets": [121, 122, 123, 124]}, {"text": "uncover", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["uncover"], "offsets": [104]}], "trigger": {"text": "investigating", "tokens": ["investigating"], "offsets": [119]}}, {"event_type": "PUR", "arguments": [{"text": "\u201c black - box \u201d gnn - based models", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["\u201c", "black", "-", "box", "\u201d", "gnn", "-", "based", "models"], "offsets": [109, 110, 111, 112, 113, 114, 115, 116, 117]}], "trigger": {"text": "uncover", "tokens": ["uncover"], "offsets": [104]}}], "document": ["identifying", "user", "geolocation", "in", "online", "social", "networks", "is", "an", "essential", "task", "in", "many", "location", "-", "based", "applications", ".", "existing", "methods", "rely", "on", "the", "similarity", "of", "text", "and", "network", "structure", ",", "however", ",", "they", "suffer", "from", "a", "lack", "of", "interpretability", "on", "the", "corresponding", "results", ",", "which", "is", "crucial", "for", "understanding", "model", "behavior", ".", "in", "this", "work", ",", "we", "adopt", "influence", "functions", "to", "interpret", "the", "behavior", "of", "gnn", "-", "based", "models", "by", "identifying", "the", "importance", "of", "training", "users", "when", "predicting", "the", "locations", "of", "the", "testing", "users", ".", "this", "methodology", "helps", "with", "providing", "meaningful", "explanations", "on", "prediction", "results", ".", "furthermore", ",", "it", "also", "initiates", "an", "attempt", "to", "uncover", "the", "so", "-", "called", "\u201c", "black", "-", "box", "\u201d", "gnn", "-", "based", "models", "by", "investigating", "the", "effect", "of", "individual", "nodes", "."]}, {"venue": "ACL", "title": "Simple, Interpretable and Stable Method for Detecting Words with Usage Change across Corpora", "abstract": "The problem of comparing two bodies of text and searching for words that differ in their usage between them arises often in digital humanities and computational social science. This is commonly approached by training word embeddings on each corpus, aligning the vector spaces, and looking for words whose cosine distance in the aligned space is large. However, these methods often require extensive filtering of the vocabulary to perform well, and - as we show in this work - result in unstable, and hence less reliable, results. We propose an alternative approach that does not use vector space alignment, and instead considers the neighbors of each word. The method is simple, interpretable and stable. We demonstrate its effectiveness in 9 different setups, considering different corpus splitting criteria (age, gender and profession of tweet authors, time of tweet) and different languages (English, French and Hebrew).", "doc_id": "dd652f456cc5a0bf77407244bf86d40f", "publication_year": 2020, "sentences": ["the problem of comparing two bodies of text and searching for words that differ in their usage between them arises often in digital humanities and computational social science .", "this is commonly approached by training word embeddings on each corpus , aligning the vector spaces , and looking for words whose cosine distance in the aligned space is large .", "however , these methods often require extensive filtering of the vocabulary to perform well , and - as we show in this work - result in unstable , and hence less reliable , results .", "we propose an alternative approach that does not use vector space alignment , and instead considers the neighbors of each word .", "the method is simple , interpretable and stable .", "we demonstrate its effectiveness in 9 different setups , considering different corpus splitting criteria ( age , gender and profession of tweet authors , time of tweet ) and different languages ( english , french and hebrew ) ."], "events": [{"event_type": "RWS", "arguments": [{"text": "word embeddings", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["word", "embeddings"], "offsets": [35, 36]}, {"text": "vector spaces", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["vector", "spaces"], "offsets": [43, 44]}, {"text": "words whose cosine distance in the aligned space is large", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["words", "whose", "cosine", "distance", "in", "the", "aligned", "space", "is", "large"], "offsets": [49, 50, 51, 52, 53, 54, 55, 56, 57, 58]}], "trigger": {"text": "approached", "tokens": ["approached"], "offsets": [32]}}, {"event_type": "RWF", "arguments": [{"text": "extensive filtering of the vocabulary", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["extensive", "filtering", "of", "the", "vocabulary"], "offsets": [66, 67, 68, 69, 70]}, {"text": "methods", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["methods"], "offsets": [63]}], "trigger": {"text": "require", "tokens": ["require"], "offsets": [65]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [95]}, {"text": "alternative approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["alternative", "approach"], "offsets": [98, 99]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [96]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [126]}, {"text": "effectiveness", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["effectiveness"], "offsets": [129]}, {"text": "in 9 different setups", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "9", "different", "setups"], "offsets": [130, 131, 132, 133]}, {"text": "considering different corpus splitting criteria ( age , gender and profession of tweet authors , time of tweet ) and different languages", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["considering", "different", "corpus", "splitting", "criteria", "and", "different", "languages"], "offsets": [135, 136, 137, 138, 139, 154, 155, 156]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [127]}}, {"event_type": "RWF", "arguments": [{"text": "methods", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["methods"], "offsets": [63]}, {"text": "unstable , and hence less reliable , results", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["unstable", "and", "less", "reliable", ",", "results"], "offsets": [86, 88, 90, 91, 92, 93]}], "trigger": {"text": "result", "tokens": ["result"], "offsets": [84]}}], "document": ["the", "problem", "of", "comparing", "two", "bodies", "of", "text", "and", "searching", "for", "words", "that", "differ", "in", "their", "usage", "between", "them", "arises", "often", "in", "digital", "humanities", "and", "computational", "social", "science", ".", "this", "is", "commonly", "approached", "by", "training", "word", "embeddings", "on", "each", "corpus", ",", "aligning", "the", "vector", "spaces", ",", "and", "looking", "for", "words", "whose", "cosine", "distance", "in", "the", "aligned", "space", "is", "large", ".", "however", ",", "these", "methods", "often", "require", "extensive", "filtering", "of", "the", "vocabulary", "to", "perform", "well", ",", "and", "-", "as", "we", "show", "in", "this", "work", "-", "result", "in", "unstable", ",", "and", "hence", "less", "reliable", ",", "results", ".", "we", "propose", "an", "alternative", "approach", "that", "does", "not", "use", "vector", "space", "alignment", ",", "and", "instead", "considers", "the", "neighbors", "of", "each", "word", ".", "the", "method", "is", "simple", ",", "interpretable", "and", "stable", ".", "we", "demonstrate", "its", "effectiveness", "in", "9", "different", "setups", ",", "considering", "different", "corpus", "splitting", "criteria", "(", "age", ",", "gender", "and", "profession", "of", "tweet", "authors", ",", "time", "of", "tweet", ")", "and", "different", "languages", "(", "english", ",", "french", "and", "hebrew", ")", "."]}, {"venue": "ACL", "title": "Estimating Mutual Information Between Dense Word Embeddings", "abstract": "Word embedding-based similarity measures are currently among the top-performing methods on unsupervised semantic textual similarity (STS) tasks. Recent work has increasingly adopted a statistical view on these embeddings, with some of the top approaches being essentially various correlations (which include the famous cosine similarity). Another excellent candidate for a similarity measure is mutual information (MI), which can capture arbitrary dependencies between the variables and has a simple and intuitive expression. Unfortunately, its use in the context of dense word embeddings has so far been avoided due to difficulties with estimating MI for continuous data. In this work we go through a vast literature on estimating MI in such cases and single out the most promising methods, yielding a simple and elegant similarity measure for word embeddings. We show that mutual information is a viable alternative to correlations, gives an excellent signal that correlates well with human judgements of similarity and rivals existing state-of-the-art unsupervised methods.", "doc_id": "620d132c9aa19d58a4aa47369c9905a4", "publication_year": 2020, "sentences": ["word embedding - based similarity measures are currently among the top - performing methods on unsupervised semantic textual similarity ( sts ) tasks .", "recent work has increasingly adopted a statistical view on these embeddings , with some of the top approaches being essentially various correlations ( which include the famous cosine similarity ) .", "another excellent candidate for a similarity measure is mutual information ( mi ) , which can capture arbitrary dependencies between the variables and has a simple and intuitive expression .", "unfortunately , its use in the context of dense word embeddings has so far been avoided due to difficulties with estimating mi for continuous data .", "in this work we go through a vast literature on estimating mi in such cases and single out the most promising methods , yielding a simple and elegant similarity measure for word embeddings .", "we show that mutual information is a viable alternative to correlations , gives an excellent signal that correlates well with human judgements of similarity and rivals existing state - of - the - art unsupervised methods ."], "events": [{"event_type": "ITT", "arguments": [{"text": "similarity measures", "nugget_type": "APP", "argument_type": "Target", "tokens": ["similarity", "measures"], "offsets": [4, 5]}], "trigger": {"text": "methods", "tokens": ["methods"], "offsets": [13]}}, {"event_type": "RWF", "arguments": [{"text": "continuous data", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["continuous", "data"], "offsets": [108, 109]}, {"text": "difficulties", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["difficulties"], "offsets": [103]}, {"text": "mutual information", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["mutual", "information"], "offsets": [63, 64]}], "trigger": {"text": "estimating", "tokens": ["estimating"], "offsets": [105]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [114]}, {"text": "vast literature", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["vast", "literature"], "offsets": [118, 119]}], "trigger": {"text": "go through", "tokens": ["go", "through"], "offsets": [115, 116]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [114]}, {"text": "most promising methods", "nugget_type": "APP", "argument_type": "Content", "tokens": ["most", "promising", "methods"], "offsets": [130, 131, 132]}], "trigger": {"text": "single out", "tokens": ["single", "out"], "offsets": [127, 128]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [114]}, {"text": "word embeddings", "nugget_type": "APP", "argument_type": "Target", "tokens": ["word", "embeddings"], "offsets": [142, 143]}, {"text": "simple and elegant similarity measure", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["simple", "and", "elegant", "similarity", "measure"], "offsets": [136, 137, 138, 139, 140]}], "trigger": {"text": "yielding", "tokens": ["yielding"], "offsets": [134]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [145]}, {"text": "viable alternative", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["viable", "alternative"], "offsets": [152, 153]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [146]}}, {"event_type": "FAC", "arguments": [{"text": "mutual information", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["mutual", "information"], "offsets": [148, 149]}, {"text": "correlations", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["correlations"], "offsets": [155]}], "trigger": {"text": "viable alternative", "tokens": ["viable", "alternative"], "offsets": [152, 153]}}, {"event_type": "FAC", "arguments": [{"text": "mutual information", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["mutual", "information"], "offsets": [148, 149]}, {"text": "excellent signal", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["excellent", "signal"], "offsets": [159, 160]}], "trigger": {"text": "gives", "tokens": ["gives"], "offsets": [157]}}, {"event_type": "CMP", "arguments": [{"text": "mutual information", "nugget_type": "FEA", "argument_type": "Arg1", "tokens": ["mutual", "information"], "offsets": [148, 149]}, {"text": "existing state - of - the - art unsupervised methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["existing", "state", "-", "of", "-", "the", "-", "art", "unsupervised", "methods"], "offsets": [171, 172, 173, 174, 175, 176, 177, 178, 179, 180]}], "trigger": {"text": "rivals", "tokens": ["rivals"], "offsets": [170]}}], "document": ["word", "embedding", "-", "based", "similarity", "measures", "are", "currently", "among", "the", "top", "-", "performing", "methods", "on", "unsupervised", "semantic", "textual", "similarity", "(", "sts", ")", "tasks", ".", "recent", "work", "has", "increasingly", "adopted", "a", "statistical", "view", "on", "these", "embeddings", ",", "with", "some", "of", "the", "top", "approaches", "being", "essentially", "various", "correlations", "(", "which", "include", "the", "famous", "cosine", "similarity", ")", ".", "another", "excellent", "candidate", "for", "a", "similarity", "measure", "is", "mutual", "information", "(", "mi", ")", ",", "which", "can", "capture", "arbitrary", "dependencies", "between", "the", "variables", "and", "has", "a", "simple", "and", "intuitive", "expression", ".", "unfortunately", ",", "its", "use", "in", "the", "context", "of", "dense", "word", "embeddings", "has", "so", "far", "been", "avoided", "due", "to", "difficulties", "with", "estimating", "mi", "for", "continuous", "data", ".", "in", "this", "work", "we", "go", "through", "a", "vast", "literature", "on", "estimating", "mi", "in", "such", "cases", "and", "single", "out", "the", "most", "promising", "methods", ",", "yielding", "a", "simple", "and", "elegant", "similarity", "measure", "for", "word", "embeddings", ".", "we", "show", "that", "mutual", "information", "is", "a", "viable", "alternative", "to", "correlations", ",", "gives", "an", "excellent", "signal", "that", "correlates", "well", "with", "human", "judgements", "of", "similarity", "and", "rivals", "existing", "state", "-", "of", "-", "the", "-", "art", "unsupervised", "methods", "."]}, {"venue": "ACL", "title": "schuBERT: Optimizing Elements of BERT", "abstract": "Transformers have gradually become a key component for many state-of-the-art natural language representation models. A recent Transformer based model- BERTachieved state-of-the-art results on various natural language processing tasks, including GLUE, SQuAD v1.1, and SQuAD v2.0. This model however is computationally prohibitive and has a huge number of parameters. In this work we revisit the architecture choices of BERT in efforts to obtain a lighter model. We focus on reducing the number of parameters yet our methods can be applied towards other objectives such FLOPs or latency. We show that much efficient light BERT models can be obtained by reducing algorithmically chosen correct architecture design dimensions rather than reducing the number of Transformer encoder layers. In particular, our schuBERT gives 6.6% higher average accuracy on GLUE and SQuAD datasets as compared to BERT with three encoder layers while having the same number of parameters.", "doc_id": "a320de069149e7f1035037a72db0c9a5", "publication_year": 2020, "sentences": ["transformers have gradually become a key component for many state - of - the - art natural language representation models .", "a recent transformer based model - bertachieved state - of - the - art results on various natural language processing tasks , including glue , squad v1 . 1 , and squad v2 . 0 .", "this model however is computationally prohibitive and has a huge number of parameters .", "in this work we revisit the architecture choices of bert in efforts to obtain a lighter model .", "we focus on reducing the number of parameters yet our methods can be applied towards other objectives such flops or latency .", "we show that much efficient light bert models can be obtained by reducing algorithmically chosen correct architecture design dimensions rather than reducing the number of transformer encoder layers .", "in particular , our schubert gives 6 . 6 % higher average accuracy on glue and squad datasets as compared to bert with three encoder layers while having the same number of parameters ."], "events": [{"event_type": "ITT", "arguments": [{"text": "natural language representation models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["natural", "language", "representation", "models"], "offsets": [16, 17, 18, 19]}], "trigger": {"text": "become", "tokens": ["become"], "offsets": [3]}}, {"event_type": "RWF", "arguments": [{"text": "model - bertachieved", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["model", "-", "bertachieved"], "offsets": [25, 26, 27]}, {"text": "huge number of parameters", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["huge", "number", "of", "parameters"], "offsets": [66, 67, 68, 69]}], "trigger": {"text": "has", "tokens": ["has"], "offsets": [64]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [74]}, {"text": "architecture choices of bert", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["architecture", "choices", "of", "bert"], "offsets": [77, 78, 79, 80]}, {"text": "obtain", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["obtain"], "offsets": [84]}], "trigger": {"text": "revisit", "tokens": ["revisit"], "offsets": [75]}}, {"event_type": "PUR", "arguments": [{"text": "lighter model", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["lighter", "model"], "offsets": [86, 87]}], "trigger": {"text": "obtain", "tokens": ["obtain"], "offsets": [84]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [89]}, {"text": "number of parameters", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["number", "of", "parameters"], "offsets": [94, 95, 96]}], "trigger": {"text": "reducing", "tokens": ["reducing"], "offsets": [92]}}, {"event_type": "FAC", "arguments": [{"text": "flops or latency", "nugget_type": "MOD", "argument_type": "Object", "tokens": ["flops", "or", "latency"], "offsets": [107, 108, 109]}], "trigger": {"text": "applied", "tokens": ["applied"], "offsets": [102]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [111]}, {"text": "reducing", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["reducing"], "offsets": [123]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [112]}}, {"event_type": "CMP", "arguments": [{"text": "algorithmically chosen correct architecture design dimensions", "nugget_type": "FEA", "argument_type": "Arg1", "tokens": ["algorithmically", "chosen", "correct", "architecture", "design", "dimensions"], "offsets": [124, 125, 126, 127, 128, 129]}, {"text": "number of transformer encoder layers", "nugget_type": "FEA", "argument_type": "Arg2", "tokens": ["number", "of", "transformer", "encoder", "layers"], "offsets": [134, 135, 136, 137, 138]}, {"text": "much efficient light bert models", "nugget_type": "STR", "argument_type": "Result", "tokens": ["much", "efficient", "light", "bert", "models"], "offsets": [114, 115, 116, 117, 118]}], "trigger": {"text": "reducing", "tokens": ["reducing"], "offsets": [123]}}, {"event_type": "CMP", "arguments": [{"text": "6 . 6 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["6", ".", "6", "%"], "offsets": [146, 147, 148, 149]}, {"text": "higher", "nugget_type": "STR", "argument_type": "Result", "tokens": ["higher"], "offsets": [150]}, {"text": "average accuracy", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["average", "accuracy"], "offsets": [151, 152]}, {"text": "glue and squad datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["glue", "and", "squad", "datasets"], "offsets": [154, 155, 156, 157]}, {"text": "bert with three encoder layers", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["bert", "with", "three", "encoder", "layers"], "offsets": [161, 162, 163, 164, 165]}], "trigger": {"text": "higher", "tokens": ["higher"], "offsets": [150]}}], "document": ["transformers", "have", "gradually", "become", "a", "key", "component", "for", "many", "state", "-", "of", "-", "the", "-", "art", "natural", "language", "representation", "models", ".", "a", "recent", "transformer", "based", "model", "-", "bertachieved", "state", "-", "of", "-", "the", "-", "art", "results", "on", "various", "natural", "language", "processing", "tasks", ",", "including", "glue", ",", "squad", "v1", ".", "1", ",", "and", "squad", "v2", ".", "0", ".", "this", "model", "however", "is", "computationally", "prohibitive", "and", "has", "a", "huge", "number", "of", "parameters", ".", "in", "this", "work", "we", "revisit", "the", "architecture", "choices", "of", "bert", "in", "efforts", "to", "obtain", "a", "lighter", "model", ".", "we", "focus", "on", "reducing", "the", "number", "of", "parameters", "yet", "our", "methods", "can", "be", "applied", "towards", "other", "objectives", "such", "flops", "or", "latency", ".", "we", "show", "that", "much", "efficient", "light", "bert", "models", "can", "be", "obtained", "by", "reducing", "algorithmically", "chosen", "correct", "architecture", "design", "dimensions", "rather", "than", "reducing", "the", "number", "of", "transformer", "encoder", "layers", ".", "in", "particular", ",", "our", "schubert", "gives", "6", ".", "6", "%", "higher", "average", "accuracy", "on", "glue", "and", "squad", "datasets", "as", "compared", "to", "bert", "with", "three", "encoder", "layers", "while", "having", "the", "same", "number", "of", "parameters", "."]}, {"venue": "ACL", "title": "SUPERT: Towards New Frontiers in Unsupervised Evaluation Metrics for Multi-Document Summarization", "abstract": "We study unsupervised multi-document summarization evaluation metrics, which require neither human-written reference summaries nor human annotations (e.g. preferences, ratings, etc.). We propose SUPERT, which rates the quality of a summary by measuring its semantic similarity with a pseudo reference summary, i.e. selected salient sentences from the source documents, using contextualized embeddings and soft token alignment techniques. Compared to the state-of-the-art unsupervised evaluation metrics, SUPERT correlates better with human ratings by 18- 39%. Furthermore, we use SUPERT as rewards to guide a neural-based reinforcement learning summarizer, yielding favorable performance compared to the state-of-the-art unsupervised summarizers. All source code is available at https://github.com/yg211/acl20-ref-free-eval.", "doc_id": "737734f9a7e4cea0aa8497220c018f00", "publication_year": 2020, "sentences": ["we study unsupervised multi - document summarization evaluation metrics , which require neither human - written reference summaries nor human annotations ( e . g . preferences , ratings , etc . ) .", "we propose supert , which rates the quality of a summary by measuring its semantic similarity with a pseudo reference summary , i . e . selected salient sentences from the source documents , using contextualized embeddings and soft token alignment techniques .", "compared to the state - of - the - art unsupervised evaluation metrics , supert correlates better with human ratings by 18 - 39 % .", "furthermore , we use supert as rewards to guide a neural - based reinforcement learning summarizer , yielding favorable performance compared to the state - of - the - art unsupervised summarizers .", "all source code is available at https : / / github . com / yg211 / acl20 - ref - free - eval ."], "events": [{"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [0]}, {"text": "unsupervised multi - document summarization evaluation metrics", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["unsupervised", "multi", "-", "document", "summarization", "evaluation", "metrics"], "offsets": [2, 3, 4, 5, 6, 7, 8]}], "trigger": {"text": "study", "tokens": ["study"], "offsets": [1]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [34]}, {"text": "supert", "nugget_type": "APP", "argument_type": "Content", "tokens": ["supert"], "offsets": [36]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [35]}}, {"event_type": "MDS", "arguments": [{"text": "rates", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["rates"], "offsets": [39]}, {"text": "semantic similarity", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["semantic", "similarity"], "offsets": [48, 49]}], "trigger": {"text": "measuring", "tokens": ["measuring"], "offsets": [46]}}, {"event_type": "PUR", "arguments": [{"text": "quality of a summary", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["quality", "of", "a", "summary"], "offsets": [41, 42, 43, 44]}], "trigger": {"text": "rates", "tokens": ["rates"], "offsets": [39]}}, {"event_type": "MDS", "arguments": [{"text": "salient sentences", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["salient", "sentences"], "offsets": [61, 62]}, {"text": "contextualized embeddings", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["contextualized", "embeddings"], "offsets": [69, 70]}, {"text": "soft token alignment techniques", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["soft", "token", "alignment", "techniques"], "offsets": [72, 73, 74, 75]}], "trigger": {"text": "selected", "tokens": ["selected"], "offsets": [60]}}, {"event_type": "CMP", "arguments": [{"text": "state - of - the - art unsupervised evaluation metrics", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art", "unsupervised", "evaluation", "metrics"], "offsets": [80, 81, 82, 83, 84, 85, 86, 87, 88, 89]}, {"text": "supert", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["supert"], "offsets": [91]}, {"text": "human ratings", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["human", "ratings"], "offsets": [95, 96]}, {"text": "18 - 39 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["18", "-", "39", "%"], "offsets": [98, 99, 100, 101]}, {"text": "better", "nugget_type": "STR", "argument_type": "Result", "tokens": ["better"], "offsets": [93]}], "trigger": {"text": "correlates", "tokens": ["correlates"], "offsets": [92]}}, {"event_type": "CMP", "arguments": [{"text": "state - of - the - art unsupervised summarizers", "nugget_type": "MOD", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art", "unsupervised", "summarizers"], "offsets": [126, 127, 128, 129, 130, 131, 132, 133, 134]}, {"text": "neural - based reinforcement learning summarizer", "nugget_type": "MOD", "argument_type": "Arg1", "tokens": ["neural", "-", "based", "reinforcement", "learning", "summarizer"], "offsets": [113, 114, 115, 116, 117, 118]}, {"text": "performance", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["performance"], "offsets": [122]}, {"text": "favorable", "nugget_type": "STR", "argument_type": "Result", "tokens": ["favorable"], "offsets": [121]}], "trigger": {"text": "yielding", "tokens": ["yielding"], "offsets": [120]}}], "document": ["we", "study", "unsupervised", "multi", "-", "document", "summarization", "evaluation", "metrics", ",", "which", "require", "neither", "human", "-", "written", "reference", "summaries", "nor", "human", "annotations", "(", "e", ".", "g", ".", "preferences", ",", "ratings", ",", "etc", ".", ")", ".", "we", "propose", "supert", ",", "which", "rates", "the", "quality", "of", "a", "summary", "by", "measuring", "its", "semantic", "similarity", "with", "a", "pseudo", "reference", "summary", ",", "i", ".", "e", ".", "selected", "salient", "sentences", "from", "the", "source", "documents", ",", "using", "contextualized", "embeddings", "and", "soft", "token", "alignment", "techniques", ".", "compared", "to", "the", "state", "-", "of", "-", "the", "-", "art", "unsupervised", "evaluation", "metrics", ",", "supert", "correlates", "better", "with", "human", "ratings", "by", "18", "-", "39", "%", ".", "furthermore", ",", "we", "use", "supert", "as", "rewards", "to", "guide", "a", "neural", "-", "based", "reinforcement", "learning", "summarizer", ",", "yielding", "favorable", "performance", "compared", "to", "the", "state", "-", "of", "-", "the", "-", "art", "unsupervised", "summarizers", ".", "all", "source", "code", "is", "available", "at", "https", ":", "/", "/", "github", ".", "com", "/", "yg211", "/", "acl20", "-", "ref", "-", "free", "-", "eval", "."]}, {"venue": "ACL", "title": "More Diverse Dialogue Datasets via Diversity-Informed Data Collection", "abstract": "Automated generation of conversational dialogue using modern neural architectures has made notable advances. However, these models are known to have a drawback of often producing uninteresting, predictable responses; this is known as the diversity problem. We introduce a new strategy to address this problem, called Diversity-Informed Data Collection. Unlike prior approaches, which modify model architectures to solve the problem, this method uses dynamically computed corpus-level statistics to determine which conversational participants to collect data from. Diversity-Informed Data Collection produces significantly more diverse data than baseline data collection methods, and better results on two downstream tasks: emotion classification and dialogue generation. This method is generalizable and can be used with other corpus-level metrics.", "doc_id": "84bbfe534a23b51a6ea28e930e1046f8", "publication_year": 2020, "sentences": ["automated generation of conversational dialogue using modern neural architectures has made notable advances .", "however , these models are known to have a drawback of often producing uninteresting , predictable responses ; this is known as the diversity problem .", "we introduce a new strategy to address this problem , called diversity - informed data collection .", "unlike prior approaches , which modify model architectures to solve the problem , this method uses dynamically computed corpus - level statistics to determine which conversational participants to collect data from .", "diversity - informed data collection produces significantly more diverse data than baseline data collection methods , and better results on two downstream tasks : emotion classification and dialogue generation .", "this method is generalizable and can be used with other corpus - level metrics ."], "events": [{"event_type": "ITT", "arguments": [{"text": "modern neural architectures", "nugget_type": "APP", "argument_type": "Target", "tokens": ["modern", "neural", "architectures"], "offsets": [6, 7, 8]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [5]}}, {"event_type": "RWF", "arguments": [{"text": "modern neural architectures", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["modern", "neural", "architectures"], "offsets": [6, 7, 8]}, {"text": "uninteresting , predictable responses", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["uninteresting", ",", "predictable", "responses"], "offsets": [27, 28, 29, 30]}], "trigger": {"text": "often producing", "tokens": ["often", "producing"], "offsets": [25, 26]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [40]}, {"text": "address", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["address"], "offsets": [46]}, {"text": "diversity - informed data collection", "nugget_type": "APP", "argument_type": "Content", "tokens": ["diversity", "-", "informed", "data", "collection"], "offsets": [51, 52, 53, 54, 55]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [41]}}, {"event_type": "PUR", "arguments": [{"text": "often producing uninteresting , predictable responses", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["often", "producing", "uninteresting", ",", "predictable", "responses"], "offsets": [25, 26, 27, 28, 29, 30]}], "trigger": {"text": "address", "tokens": ["address"], "offsets": [46]}}, {"event_type": "WKS", "arguments": [{"text": "determine", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["determine"], "offsets": [80]}, {"text": "dynamically computed corpus - level statistics", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["dynamically", "computed", "corpus", "-", "level", "statistics"], "offsets": [73, 74, 75, 76, 77, 78]}], "trigger": {"text": "uses", "tokens": ["uses"], "offsets": [72]}}, {"event_type": "PUR", "arguments": [{"text": "which conversational participants to collect data from", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["which", "conversational", "participants", "to", "collect", "data", "from"], "offsets": [81, 82, 83, 84, 85, 86, 87]}], "trigger": {"text": "determine", "tokens": ["determine"], "offsets": [80]}}, {"event_type": "CMP", "arguments": [{"text": "diversity - informed data collection", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["diversity", "-", "informed", "data", "collection"], "offsets": [89, 90, 91, 92, 93]}, {"text": "significantly more", "nugget_type": "STR", "argument_type": "Result", "tokens": ["significantly", "more"], "offsets": [95, 96]}, {"text": "diverse data", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["diverse", "data"], "offsets": [97, 98]}, {"text": "baseline data collection methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["baseline", "data", "collection", "methods"], "offsets": [100, 101, 102, 103]}, {"text": "better", "nugget_type": "STR", "argument_type": "Result", "tokens": ["better"], "offsets": [106]}, {"text": "results", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["results"], "offsets": [107]}, {"text": "on two downstream tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "two", "downstream", "tasks"], "offsets": [108, 109, 110, 111]}], "trigger": {"text": "produces", "tokens": ["produces"], "offsets": [94]}}], "document": ["automated", "generation", "of", "conversational", "dialogue", "using", "modern", "neural", "architectures", "has", "made", "notable", "advances", ".", "however", ",", "these", "models", "are", "known", "to", "have", "a", "drawback", "of", "often", "producing", "uninteresting", ",", "predictable", "responses", ";", "this", "is", "known", "as", "the", "diversity", "problem", ".", "we", "introduce", "a", "new", "strategy", "to", "address", "this", "problem", ",", "called", "diversity", "-", "informed", "data", "collection", ".", "unlike", "prior", "approaches", ",", "which", "modify", "model", "architectures", "to", "solve", "the", "problem", ",", "this", "method", "uses", "dynamically", "computed", "corpus", "-", "level", "statistics", "to", "determine", "which", "conversational", "participants", "to", "collect", "data", "from", ".", "diversity", "-", "informed", "data", "collection", "produces", "significantly", "more", "diverse", "data", "than", "baseline", "data", "collection", "methods", ",", "and", "better", "results", "on", "two", "downstream", "tasks", ":", "emotion", "classification", "and", "dialogue", "generation", ".", "this", "method", "is", "generalizable", "and", "can", "be", "used", "with", "other", "corpus", "-", "level", "metrics", "."]}, {"venue": "ACL", "title": "One Time of Interaction May Not Be Enough: Go Deep with an Interaction-over-Interaction Network for Response Selection in Dialogues", "abstract": "Currently, researchers have paid great attention to retrieval-based dialogues in open-domain. In particular, people study the problem by investigating context-response matching for multi-turn response selection based on publicly recognized benchmark data sets. State-of-the-art methods require a response to interact with each utterance in a context from the beginning, but the interaction is performed in a shallow way. In this work, we let utterance-response interaction go deep by proposing an interaction-over-interaction network (IoI). The model performs matching by stacking multiple interaction blocks in which residual information from one time of interaction initiates the interaction process again. Thus, matching information within an utterance-response pair is extracted from the interaction of the pair in an iterative fashion, and the information flows along the chain of the blocks via representations. Evaluation results on three benchmark data sets indicate that IoI can significantly outperform state-of-the-art methods in terms of various matching metrics. Through further analysis, we also unveil how the depth of interaction affects the performance of IoI.", "doc_id": "19202df977906dde2003e9931c2be1c3", "publication_year": 2019, "sentences": ["currently , researchers have paid great attention to retrieval - based dialogues in open - domain .", "in particular , people study the problem by investigating context - response matching for multi - turn response selection based on publicly recognized benchmark data sets .", "state - of - the - art methods require a response to interact with each utterance in a context from the beginning , but the interaction is performed in a shallow way .", "in this work , we let utterance - response interaction go deep by proposing an interaction - over - interaction network ( ioi ) .", "the model performs matching by stacking multiple interaction blocks in which residual information from one time of interaction initiates the interaction process again .", "thus , matching information within an utterance - response pair is extracted from the interaction of the pair in an iterative fashion , and the information flows along the chain of the blocks via representations .", "evaluation results on three benchmark data sets indicate that ioi can significantly outperform state - of - the - art methods in terms of various matching metrics .", "through further analysis , we also unveil how the depth of interaction affects the performance of ioi ."], "events": [{"event_type": "ITT", "arguments": [{"text": "retrieval - based dialogues", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["retrieval", "-", "based", "dialogues"], "offsets": [8, 9, 10, 11]}], "trigger": {"text": "paid", "tokens": ["paid"], "offsets": [4]}}, {"event_type": "RWF", "arguments": [{"text": "interaction", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["interaction"], "offsets": [69]}, {"text": "shallow way", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["shallow", "way"], "offsets": [74, 75]}], "trigger": {"text": "performed", "tokens": ["performed"], "offsets": [71]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [81]}, {"text": "interaction - over - interaction network", "nugget_type": "APP", "argument_type": "Content", "tokens": ["interaction", "-", "over", "-", "interaction", "network"], "offsets": [92, 93, 94, 95, 96, 97]}, {"text": "go deep", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["go", "deep"], "offsets": [87, 88]}], "trigger": {"text": "proposing", "tokens": ["proposing"], "offsets": [90]}}, {"event_type": "PUR", "arguments": [{"text": "utterance - response interaction", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["utterance", "-", "response", "interaction"], "offsets": [83, 84, 85, 86]}], "trigger": {"text": "go deep", "tokens": ["go", "deep"], "offsets": [87, 88]}}, {"event_type": "MDS", "arguments": [{"text": "multiple interaction blocks", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["multiple", "interaction", "blocks"], "offsets": [108, 109, 110]}, {"text": "performs", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["performs"], "offsets": [104]}], "trigger": {"text": "stacking", "tokens": ["stacking"], "offsets": [107]}}, {"event_type": "PUR", "arguments": [{"text": "matching", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["matching"], "offsets": [105]}], "trigger": {"text": "performs", "tokens": ["performs"], "offsets": [104]}}, {"event_type": "MDS", "arguments": [{"text": "matching information", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["matching", "information"], "offsets": [128, 129]}, {"text": "interaction of the pair", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["interaction", "of", "the", "utterance", "-", "response", "pair"], "offsets": [140, 141, 142, 132, 133, 134, 135]}], "trigger": {"text": "extracted", "tokens": ["extracted"], "offsets": [137]}}, {"event_type": "MDS", "arguments": [{"text": "matching information", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["matching", "information"], "offsets": [128, 129]}, {"text": "representations", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["representations"], "offsets": [160]}, {"text": "chain of the blocks", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["chain", "of", "the", "blocks"], "offsets": [155, 156, 157, 158]}], "trigger": {"text": "flows", "tokens": ["flows"], "offsets": [152]}}, {"event_type": "CMP", "arguments": [{"text": "three benchmark data sets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["three", "benchmark", "data", "sets"], "offsets": [165, 166, 167, 168]}, {"text": "interaction - over - interaction network", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["interaction", "-", "over", "-", "interaction", "network"], "offsets": [92, 93, 94, 95, 96, 97]}, {"text": "outperform", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperform"], "offsets": [174]}, {"text": "state - of - the - art methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art", "methods"], "offsets": [175, 176, 177, 178, 179, 180, 181, 182]}], "trigger": {"text": "outperform", "tokens": ["outperform"], "offsets": [174]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [194]}, {"text": "how the depth of interaction affects the performance of ioi", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["how", "the", "depth", "of", "interaction", "affects", "the", "performance", "of", "interaction", "-", "over", "-", "interaction", "network"], "offsets": [197, 198, 199, 200, 201, 202, 203, 204, 205, 92, 93, 94, 95, 96, 97]}], "trigger": {"text": "unveil", "tokens": ["unveil"], "offsets": [196]}}], "document": ["currently", ",", "researchers", "have", "paid", "great", "attention", "to", "retrieval", "-", "based", "dialogues", "in", "open", "-", "domain", ".", "in", "particular", ",", "people", "study", "the", "problem", "by", "investigating", "context", "-", "response", "matching", "for", "multi", "-", "turn", "response", "selection", "based", "on", "publicly", "recognized", "benchmark", "data", "sets", ".", "state", "-", "of", "-", "the", "-", "art", "methods", "require", "a", "response", "to", "interact", "with", "each", "utterance", "in", "a", "context", "from", "the", "beginning", ",", "but", "the", "interaction", "is", "performed", "in", "a", "shallow", "way", ".", "in", "this", "work", ",", "we", "let", "utterance", "-", "response", "interaction", "go", "deep", "by", "proposing", "an", "interaction", "-", "over", "-", "interaction", "network", "(", "ioi", ")", ".", "the", "model", "performs", "matching", "by", "stacking", "multiple", "interaction", "blocks", "in", "which", "residual", "information", "from", "one", "time", "of", "interaction", "initiates", "the", "interaction", "process", "again", ".", "thus", ",", "matching", "information", "within", "an", "utterance", "-", "response", "pair", "is", "extracted", "from", "the", "interaction", "of", "the", "pair", "in", "an", "iterative", "fashion", ",", "and", "the", "information", "flows", "along", "the", "chain", "of", "the", "blocks", "via", "representations", ".", "evaluation", "results", "on", "three", "benchmark", "data", "sets", "indicate", "that", "ioi", "can", "significantly", "outperform", "state", "-", "of", "-", "the", "-", "art", "methods", "in", "terms", "of", "various", "matching", "metrics", ".", "through", "further", "analysis", ",", "we", "also", "unveil", "how", "the", "depth", "of", "interaction", "affects", "the", "performance", "of", "ioi", "."]}, {"venue": "ACL", "title": "Don\u2019t Rule Out Monolingual Speakers: A Method For Crowdsourcing Machine Translation Data", "abstract": "High-performing machine translation (MT) systems can help overcome language barriers while making it possible for everyone to communicate and use language technologies in the language of their choice. However, such systems require large amounts of parallel sentences for training, and translators can be difficult to find and expensive. Here, we present a data collection strategy for MT which, in contrast, is cheap and simple, as it does not require bilingual speakers. Based on the insight that humans pay specific attention to movements, we use graphics interchange formats (GIFs) as a pivot to collect parallel sentences from monolingual annotators. We use our strategy to collect data in Hindi, Tamil and English. As a baseline, we also collect data using images as a pivot. We perform an intrinsic evaluation by manually evaluating a subset of the sentence pairs and an extrinsic evaluation by finetuning mBART (Liu et al., 2020) on the collected data. We find that sentences collected via GIFs are indeed of higher quality.", "doc_id": "23e8a20ae1c1779985ecb0339f46e32e", "publication_year": 2021, "sentences": ["high - performing machine translation ( mt ) systems can help overcome language barriers while making it possible for everyone to communicate and use language technologies in the language of their choice .", "however , such systems require large amounts of parallel sentences for training , and translators can be difficult to find and expensive .", "here , we present a data collection strategy for mt which , in contrast , is cheap and simple , as it does not require bilingual speakers .", "based on the insight that humans pay specific attention to movements , we use graphics interchange formats ( gifs ) as a pivot to collect parallel sentences from monolingual annotators .", "we use our strategy to collect data in hindi , tamil and english .", "as a baseline , we also collect data using images as a pivot .", "we perform an intrinsic evaluation by manually evaluating a subset of the sentence pairs and an extrinsic evaluation by finetuning mbart ( liu et al . , 2020 ) on the collected data .", "we find that sentences collected via gifs are indeed of higher quality ."], "events": [{"event_type": "ITT", "arguments": [{"text": "machine translation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["machine", "translation"], "offsets": [3, 4]}], "trigger": {"text": "overcome", "tokens": ["overcome"], "offsets": [11]}}, {"event_type": "RWF", "arguments": [{"text": "translators can be difficult to find", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["translators", "can", "be", "difficult", "to", "find"], "offsets": [47, 48, 49, 50, 51, 52]}, {"text": "expensive", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["expensive"], "offsets": [54]}, {"text": "large amounts of parallel sentences for training", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["large", "amounts", "of", "parallel", "sentences", "for", "training"], "offsets": [38, 39, 40, 41, 42, 43, 44]}, {"text": "machine translation", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["machine", "translation"], "offsets": [3, 4]}], "trigger": {"text": "require", "tokens": ["require"], "offsets": [37]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [58]}, {"text": "data collection strategy", "nugget_type": "APP", "argument_type": "Content", "tokens": ["data", "collection", "strategy"], "offsets": [61, 62, 63]}, {"text": "machine translation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["machine", "translation"], "offsets": [3, 4]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [59]}}, {"event_type": "MDS", "arguments": [{"text": "parallel sentences", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["parallel", "sentences"], "offsets": [109, 110]}, {"text": "graphics interchange formats", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["graphics", "interchange", "formats"], "offsets": [98, 99, 100]}], "trigger": {"text": "collect", "tokens": ["collect"], "offsets": [108]}}, {"event_type": "MDS", "arguments": [{"text": "data collection strategy", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["data", "collection", "strategy"], "offsets": [61, 62, 63]}, {"text": "collect", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["collect"], "offsets": [120]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [116]}}, {"event_type": "PUR", "arguments": [{"text": "data in hindi , tamil and english", "nugget_type": "DST", "argument_type": "Aim", "tokens": ["data", "in", "hindi", ",", "tamil", "and", "english"], "offsets": [121, 122, 123, 124, 125, 126, 127]}], "trigger": {"text": "collect", "tokens": ["collect"], "offsets": [120]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [133]}, {"text": "data", "nugget_type": "DST", "argument_type": "Content", "tokens": ["data"], "offsets": [136]}, {"text": "baseline", "nugget_type": "APP", "argument_type": "Target", "tokens": ["baseline"], "offsets": [131]}], "trigger": {"text": "collect", "tokens": ["collect"], "offsets": [135]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [143]}, {"text": "extrinsic evaluation", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["extrinsic", "evaluation"], "offsets": [159, 160]}, {"text": "intrinsic evaluation", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["intrinsic", "evaluation"], "offsets": [146, 147]}], "trigger": {"text": "perform", "tokens": ["perform"], "offsets": [144]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [177]}, {"text": "collected", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["collected"], "offsets": [181]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [178]}}, {"event_type": "CMP", "arguments": [{"text": "higher quality", "nugget_type": "STR", "argument_type": "Result", "tokens": ["higher", "quality"], "offsets": [187, 188]}, {"text": "sentences", "nugget_type": "FEA", "argument_type": "Arg1", "tokens": ["sentences"], "offsets": [180]}, {"text": "via gifs", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["via", "gifs"], "offsets": [182, 183]}, {"text": "indeed", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["indeed"], "offsets": [185]}], "trigger": {"text": "collected", "tokens": ["collected"], "offsets": [181]}}], "document": ["high", "-", "performing", "machine", "translation", "(", "mt", ")", "systems", "can", "help", "overcome", "language", "barriers", "while", "making", "it", "possible", "for", "everyone", "to", "communicate", "and", "use", "language", "technologies", "in", "the", "language", "of", "their", "choice", ".", "however", ",", "such", "systems", "require", "large", "amounts", "of", "parallel", "sentences", "for", "training", ",", "and", "translators", "can", "be", "difficult", "to", "find", "and", "expensive", ".", "here", ",", "we", "present", "a", "data", "collection", "strategy", "for", "mt", "which", ",", "in", "contrast", ",", "is", "cheap", "and", "simple", ",", "as", "it", "does", "not", "require", "bilingual", "speakers", ".", "based", "on", "the", "insight", "that", "humans", "pay", "specific", "attention", "to", "movements", ",", "we", "use", "graphics", "interchange", "formats", "(", "gifs", ")", "as", "a", "pivot", "to", "collect", "parallel", "sentences", "from", "monolingual", "annotators", ".", "we", "use", "our", "strategy", "to", "collect", "data", "in", "hindi", ",", "tamil", "and", "english", ".", "as", "a", "baseline", ",", "we", "also", "collect", "data", "using", "images", "as", "a", "pivot", ".", "we", "perform", "an", "intrinsic", "evaluation", "by", "manually", "evaluating", "a", "subset", "of", "the", "sentence", "pairs", "and", "an", "extrinsic", "evaluation", "by", "finetuning", "mbart", "(", "liu", "et", "al", ".", ",", "2020", ")", "on", "the", "collected", "data", ".", "we", "find", "that", "sentences", "collected", "via", "gifs", "are", "indeed", "of", "higher", "quality", "."]}, {"venue": "ACL", "title": "Multilingual Document-Level Translation Enables Zero-Shot Transfer From Sentences to Documents", "abstract": "Document-level neural machine translation (DocNMT) achieves coherent translations by incorporating cross-sentence context. However, for most language pairs there\u2019s a shortage of parallel documents, although parallel sentences are readily available. In this paper, we study whether and how contextual modeling in DocNMT is transferable via multilingual modeling. We focus on the scenario of zero-shot transfer from teacher languages with document level data to student languages with no documents but sentence level data, and for the first time treat document-level translation as a transfer learning problem. Using simple concatenation-based DocNMT, we explore the effect of 3 factors on the transfer: the number of teacher languages with document level data, the balance between document and sentence level data at training, and the data condition of parallel documents (genuine vs. back-translated). Our experiments on Europarl-7 and IWSLT-10 show the feasibility of multilingual transfer for DocNMT, particularly on document-specific metrics. We observe that more teacher languages and adequate data balance both contribute to better transfer quality. Surprisingly, the transfer is less sensitive to the data condition, where multilingual DocNMT delivers decent performance with either back-translated or genuine document pairs.", "doc_id": "435e95bed53d524f51f36664a13a7866", "publication_year": 2022, "sentences": ["document - level neural machine translation ( docnmt ) achieves coherent translations by incorporating cross - sentence context .", "however , for most language pairs there \u2019 s a shortage of parallel documents , although parallel sentences are readily available .", "in this paper , we study whether and how contextual modeling in docnmt is transferable via multilingual modeling .", "we focus on the scenario of zero - shot transfer from teacher languages with document level data to student languages with no documents but sentence level data , and for the first time treat document - level translation as a transfer learning problem .", "using simple concatenation - based docnmt , we explore the effect of 3 factors on the transfer : the number of teacher languages with document level data , the balance between document and sentence level data at training , and the data condition of parallel documents ( genuine vs . back - translated ) .", "our experiments on europarl - 7 and iwslt - 10 show the feasibility of multilingual transfer for docnmt , particularly on document - specific metrics .", "we observe that more teacher languages and adequate data balance both contribute to better transfer quality .", "surprisingly , the transfer is less sensitive to the data condition , where multilingual docnmt delivers decent performance with either back - translated or genuine document pairs ."], "events": [{"event_type": "ITT", "arguments": [{"text": "document - level neural machine translation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["document", "-", "level", "neural", "machine", "translation"], "offsets": [0, 1, 2, 3, 4, 5]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [9]}}, {"event_type": "RWF", "arguments": [{"text": "language pairs", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["language", "pairs"], "offsets": [23, 24]}], "trigger": {"text": "shortage of parallel documents", "tokens": ["shortage", "of", "parallel", "documents"], "offsets": [29, 30, 31, 32]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [45]}, {"text": "contextual modeling in docnmt", "nugget_type": "APP", "argument_type": "Content", "tokens": ["contextual", "modeling", "in", "document", "-", "level", "neural", "machine", "translation"], "offsets": [50, 51, 52, 0, 1, 2, 3, 4, 5]}], "trigger": {"text": "study", "tokens": ["study"], "offsets": [46]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [60]}, {"text": "scenario of zero - shot transfer", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["scenario", "of", "zero", "-", "shot", "transfer"], "offsets": [64, 65, 66, 67, 68, 69]}], "trigger": {"text": "focus", "tokens": ["focus"], "offsets": [61]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [60]}, {"text": "document - level translation", "nugget_type": "APP", "argument_type": "Content", "tokens": ["document", "-", "level", "translation"], "offsets": [94, 95, 96, 97]}, {"text": "transfer learning problem", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["transfer", "learning", "problem"], "offsets": [100, 101, 102]}], "trigger": {"text": "treat", "tokens": ["treat"], "offsets": [93]}}, {"event_type": "MDS", "arguments": [{"text": "simple concatenation - based docnmt", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["simple", "concatenation", "-", "based", "document", "-", "level", "neural", "machine", "translation"], "offsets": [105, 106, 107, 108, 0, 1, 2, 3, 4, 5]}, {"text": "explore", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["explore"], "offsets": [112]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [104]}}, {"event_type": "PUR", "arguments": [{"text": "effect of 3 factors on the transfer", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["effect", "of", "3", "factors", "on", "the", "transfer"], "offsets": [114, 115, 116, 117, 118, 119, 120]}], "trigger": {"text": "explore", "tokens": ["explore"], "offsets": [112]}}, {"event_type": "FAC", "arguments": [{"text": "feasibility of multilingual transfer for docnmt", "nugget_type": "STR", "argument_type": "Subject", "tokens": ["feasibility", "of", "multilingual", "transfer", "for", "document", "-", "level", "neural", "machine", "translation"], "offsets": [171, 172, 173, 174, 175, 0, 1, 2, 3, 4, 5]}, {"text": "on europarl - 7 and iwslt - 10", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "europarl", "-", "7", "and", "iwslt", "-", "10"], "offsets": [161, 162, 163, 164, 165, 166, 167, 168]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [169]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [185]}, {"text": "contribute", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["contribute"], "offsets": [196]}], "trigger": {"text": "observe", "tokens": ["observe"], "offsets": [186]}}, {"event_type": "FAC", "arguments": [{"text": "better transfer quality", "nugget_type": "STR", "argument_type": "Object", "tokens": ["better", "transfer", "quality"], "offsets": [198, 199, 200]}, {"text": "more teacher languages", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["more", "teacher", "languages"], "offsets": [188, 189, 190]}, {"text": "adequate data balance", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["adequate", "data", "balance"], "offsets": [192, 193, 194]}], "trigger": {"text": "contribute", "tokens": ["contribute"], "offsets": [196]}}, {"event_type": "FAC", "arguments": [{"text": "transfer", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["transfer"], "offsets": [205]}, {"text": "data condition", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["data", "condition"], "offsets": [211, 212]}], "trigger": {"text": "less sensitive", "tokens": ["less", "sensitive"], "offsets": [207, 208]}}, {"event_type": "FAC", "arguments": [{"text": "multilingual docnmt", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["multilingual", "document", "-", "level", "neural", "machine", "translation"], "offsets": [215, 0, 1, 2, 3, 4, 5]}, {"text": "decent performance", "nugget_type": "STR", "argument_type": "Object", "tokens": ["decent", "performance"], "offsets": [218, 219]}, {"text": "with either back - translated or genuine document pairs", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "either", "back", "-", "translated", "or", "genuine", "document", "pairs"], "offsets": [220, 221, 222, 223, 224, 225, 226, 227, 228]}], "trigger": {"text": "delivers", "tokens": ["delivers"], "offsets": [217]}}], "document": ["document", "-", "level", "neural", "machine", "translation", "(", "docnmt", ")", "achieves", "coherent", "translations", "by", "incorporating", "cross", "-", "sentence", "context", ".", "however", ",", "for", "most", "language", "pairs", "there", "\u2019", "s", "a", "shortage", "of", "parallel", "documents", ",", "although", "parallel", "sentences", "are", "readily", "available", ".", "in", "this", "paper", ",", "we", "study", "whether", "and", "how", "contextual", "modeling", "in", "docnmt", "is", "transferable", "via", "multilingual", "modeling", ".", "we", "focus", "on", "the", "scenario", "of", "zero", "-", "shot", "transfer", "from", "teacher", "languages", "with", "document", "level", "data", "to", "student", "languages", "with", "no", "documents", "but", "sentence", "level", "data", ",", "and", "for", "the", "first", "time", "treat", "document", "-", "level", "translation", "as", "a", "transfer", "learning", "problem", ".", "using", "simple", "concatenation", "-", "based", "docnmt", ",", "we", "explore", "the", "effect", "of", "3", "factors", "on", "the", "transfer", ":", "the", "number", "of", "teacher", "languages", "with", "document", "level", "data", ",", "the", "balance", "between", "document", "and", "sentence", "level", "data", "at", "training", ",", "and", "the", "data", "condition", "of", "parallel", "documents", "(", "genuine", "vs", ".", "back", "-", "translated", ")", ".", "our", "experiments", "on", "europarl", "-", "7", "and", "iwslt", "-", "10", "show", "the", "feasibility", "of", "multilingual", "transfer", "for", "docnmt", ",", "particularly", "on", "document", "-", "specific", "metrics", ".", "we", "observe", "that", "more", "teacher", "languages", "and", "adequate", "data", "balance", "both", "contribute", "to", "better", "transfer", "quality", ".", "surprisingly", ",", "the", "transfer", "is", "less", "sensitive", "to", "the", "data", "condition", ",", "where", "multilingual", "docnmt", "delivers", "decent", "performance", "with", "either", "back", "-", "translated", "or", "genuine", "document", "pairs", "."]}, {"venue": "ACL", "title": "Modeling Syntactic-Semantic Dependency Correlations in Semantic Role Labeling Using Mixture Models", "abstract": "In this paper, we propose a mixture model-based end-to-end method to model the syntactic-semantic dependency correlation in Semantic Role Labeling (SRL). Semantic dependencies in SRL are modeled as a distribution over semantic dependency labels conditioned on a predicate and an argument word.The semantic label distribution varies depending on Shortest Syntactic Dependency Path (SSDP) hop patterns.We target the variation of semantic label distributions using a mixture model, separately estimating semantic label distributions for different hop patterns and probabilistically clustering hop patterns with similar semantic label distributions.Experiments show that the proposed method successfully learns a cluster assignment reflecting the variation of semantic label distributions.Modeling the variation improves performance in predicting short distance semantic dependencies, in addition to the improvement on long distance semantic dependencies that previous syntax-aware methods have achieved.The proposed method achieves a small but statistically significant improvement over baseline methods in English, German, and Spanish and obtains competitive performance with state-of-the-art methods in English.", "doc_id": "c3c8a2d1551a95cb1294169a94abc3cf", "publication_year": 2022, "sentences": ["in this paper , we propose a mixture model - based end - to - end method to model the syntactic - semantic dependency correlation in semantic role labeling ( srl ) .", "semantic dependencies in srl are modeled as a distribution over semantic dependency labels conditioned on a predicate and an argument word .", "the semantic label distribution varies depending on shortest syntactic dependency path ( ssdp ) hop patterns .", "we target the variation of semantic label distributions using a mixture model , separately estimating semantic label distributions for different hop patterns and probabilistically clustering hop patterns with similar semantic label distributions .", "experiments show that the proposed method successfully learns a cluster assignment reflecting the variation of semantic label distributions .", "modeling the variation improves performance in predicting short distance semantic dependencies , in addition to the improvement on long distance semantic dependencies that previous syntax - aware methods have achieved .", "the proposed method achieves a small but statistically significant improvement over baseline methods in english , german , and spanish and obtains competitive performance with state - of - the - art methods in english ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [4]}, {"text": "mixture model - based end - to - end method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["mixture", "model", "-", "based", "end", "-", "to", "-", "end", "method"], "offsets": [7, 8, 9, 10, 11, 12, 13, 14, 15, 16]}, {"text": "model", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["model"], "offsets": [18]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [5]}}, {"event_type": "PUR", "arguments": [{"text": "syntactic - semantic dependency correlation", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["syntactic", "-", "semantic", "dependency", "correlation"], "offsets": [20, 21, 22, 23, 24]}, {"text": "in semantic role labeling", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "semantic", "role", "labeling"], "offsets": [25, 26, 27, 28]}], "trigger": {"text": "model", "tokens": ["model"], "offsets": [18]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [72]}, {"text": "semantic label distributions", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["semantic", "label", "distributions"], "offsets": [87, 88, 89]}], "trigger": {"text": "estimating", "tokens": ["estimating"], "offsets": [86]}}, {"event_type": "FIN", "arguments": [{"text": "successfully learns", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["successfully", "learns"], "offsets": [111, 112]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [106]}}, {"event_type": "FAC", "arguments": [{"text": "mixture model - based end - to - end method", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["mixture", "model", "-", "based", "end", "-", "to", "-", "end", "method"], "offsets": [7, 8, 9, 10, 11, 12, 13, 14, 15, 16]}, {"text": "cluster assignment", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["cluster", "assignment"], "offsets": [114, 115]}], "trigger": {"text": "successfully learns", "tokens": ["successfully", "learns"], "offsets": [111, 112]}}, {"event_type": "FAC", "arguments": [{"text": "modeling the variation", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["modeling", "the", "variation"], "offsets": [124, 125, 126]}, {"text": "performance in predicting short distance semantic dependencies", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["performance", "in", "predicting", "short", "distance", "semantic", "dependencies"], "offsets": [128, 129, 130, 131, 132, 133, 134]}], "trigger": {"text": "improves", "tokens": ["improves"], "offsets": [127]}}, {"event_type": "FAC", "arguments": [{"text": "improvement on long distance semantic dependencies", "nugget_type": "STR", "argument_type": "Object", "tokens": ["improvement", "on", "long", "distance", "semantic", "dependencies"], "offsets": [140, 141, 142, 143, 144, 145]}, {"text": "previous syntax - aware methods", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["previous", "syntax", "-", "aware", "methods"], "offsets": [147, 148, 149, 150, 151]}], "trigger": {"text": "achieved", "tokens": ["achieved"], "offsets": [153]}}, {"event_type": "CMP", "arguments": [{"text": "mixture model - based end - to - end method", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["mixture", "model", "-", "based", "end", "-", "to", "-", "end", "method"], "offsets": [7, 8, 9, 10, 11, 12, 13, 14, 15, 16]}, {"text": "small but statistically significant improvement", "nugget_type": "STR", "argument_type": "Result", "tokens": ["small", "but", "statistically", "significant", "improvement"], "offsets": [160, 161, 162, 163, 164]}, {"text": "baseline methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["baseline", "methods"], "offsets": [166, 167]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [158]}}, {"event_type": "CMP", "arguments": [{"text": "mixture model - based end - to - end method", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["mixture", "model", "-", "based", "end", "-", "to", "-", "end", "method"], "offsets": [7, 8, 9, 10, 11, 12, 13, 14, 15, 16]}, {"text": "competitive performance", "nugget_type": "STR", "argument_type": "Result", "tokens": ["competitive", "performance"], "offsets": [177, 178]}, {"text": "state - of - the - art methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art", "methods"], "offsets": [180, 181, 182, 183, 184, 185, 186, 187]}], "trigger": {"text": "obtains", "tokens": ["obtains"], "offsets": [176]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [72]}, {"text": "mixture model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["mixture", "model"], "offsets": [82, 83]}, {"text": "target", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["target"], "offsets": [73]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [80]}}, {"event_type": "PUR", "arguments": [{"text": "variation of semantic label distributions", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["variation", "of", "semantic", "label", "distributions"], "offsets": [75, 76, 77, 78, 79]}], "trigger": {"text": "target", "tokens": ["target"], "offsets": [73]}}, {"event_type": "MDS", "arguments": [{"text": "hop patterns", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["hop", "patterns"], "offsets": [97, 98]}, {"text": "similar semantic label distributions", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["similar", "semantic", "label", "distributions"], "offsets": [100, 101, 102, 103]}], "trigger": {"text": "probabilistically clustering", "tokens": ["probabilistically", "clustering"], "offsets": [95, 96]}}], "document": ["in", "this", "paper", ",", "we", "propose", "a", "mixture", "model", "-", "based", "end", "-", "to", "-", "end", "method", "to", "model", "the", "syntactic", "-", "semantic", "dependency", "correlation", "in", "semantic", "role", "labeling", "(", "srl", ")", ".", "semantic", "dependencies", "in", "srl", "are", "modeled", "as", "a", "distribution", "over", "semantic", "dependency", "labels", "conditioned", "on", "a", "predicate", "and", "an", "argument", "word", ".", "the", "semantic", "label", "distribution", "varies", "depending", "on", "shortest", "syntactic", "dependency", "path", "(", "ssdp", ")", "hop", "patterns", ".", "we", "target", "the", "variation", "of", "semantic", "label", "distributions", "using", "a", "mixture", "model", ",", "separately", "estimating", "semantic", "label", "distributions", "for", "different", "hop", "patterns", "and", "probabilistically", "clustering", "hop", "patterns", "with", "similar", "semantic", "label", "distributions", ".", "experiments", "show", "that", "the", "proposed", "method", "successfully", "learns", "a", "cluster", "assignment", "reflecting", "the", "variation", "of", "semantic", "label", "distributions", ".", "modeling", "the", "variation", "improves", "performance", "in", "predicting", "short", "distance", "semantic", "dependencies", ",", "in", "addition", "to", "the", "improvement", "on", "long", "distance", "semantic", "dependencies", "that", "previous", "syntax", "-", "aware", "methods", "have", "achieved", ".", "the", "proposed", "method", "achieves", "a", "small", "but", "statistically", "significant", "improvement", "over", "baseline", "methods", "in", "english", ",", "german", ",", "and", "spanish", "and", "obtains", "competitive", "performance", "with", "state", "-", "of", "-", "the", "-", "art", "methods", "in", "english", "."]}, {"venue": "ACL", "title": "That is a Known Lie: Detecting Previously Fact-Checked Claims", "abstract": "The recent proliferation of \u201dfake news\u201d has triggered a number of responses, most notably the emergence of several manual fact-checking initiatives. As a result and over time, a large number of fact-checked claims have been accumulated, which increases the likelihood that a new claim in social media or a new statement by a politician might have already been fact-checked by some trusted fact-checking organization, as viral claims often come back after a while in social media, and politicians like to repeat their favorite statements, true or false, over and over again. As manual fact-checking is very time-consuming (and fully automatic fact-checking has credibility issues), it is important to try to save this effort and to avoid wasting time on claims that have already been fact-checked. Interestingly, despite the importance of the task, it has been largely ignored by the research community so far. Here, we aim to bridge this gap. In particular, we formulate the task and we discuss how it relates to, but also differs from, previous work. We further create a specialized dataset, which we release to the research community. Finally, we present learning-to-rank experiments that demonstrate sizable improvements over state-of-the-art retrieval and textual similarity approaches.", "doc_id": "8cc33cd55ef4d4f1ec078108288b04cd", "publication_year": 2020, "sentences": ["the recent proliferation of \u201d fake news \u201d has triggered a number of responses , most notably the emergence of several manual fact - checking initiatives .", "as a result and over time , a large number of fact - checked claims have been accumulated , which increases the likelihood that a new claim in social media or a new statement by a politician might have already been fact - checked by some trusted fact - checking organization , as viral claims often come back after a while in social media , and politicians like to repeat their favorite statements , true or false , over and over again .", "as manual fact - checking is very time - consuming ( and fully automatic fact - checking has credibility issues ) , it is important to try to save this effort and to avoid wasting time on claims that have already been fact - checked .", "interestingly , despite the importance of the task , it has been largely ignored by the research community so far .", "here , we aim to bridge this gap .", "in particular , we formulate the task and we discuss how it relates to , but also differs from , previous work .", "we further create a specialized dataset , which we release to the research community .", "finally , we present learning - to - rank experiments that demonstrate sizable improvements over state - of - the - art retrieval and textual similarity approaches ."], "events": [{"event_type": "ITT", "arguments": [{"text": "several manual fact - checking initiatives", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["several", "manual", "fact", "-", "checking", "initiatives"], "offsets": [20, 21, 22, 23, 24, 25]}], "trigger": {"text": "emergence", "tokens": ["emergence"], "offsets": [18]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [209]}, {"text": "specialized dataset", "nugget_type": "DST", "argument_type": "Content", "tokens": ["specialized", "dataset"], "offsets": [213, 214]}], "trigger": {"text": "create", "tokens": ["create"], "offsets": [211]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [226]}, {"text": "learning - to - rank experiments", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["learning", "-", "to", "-", "rank", "experiments"], "offsets": [228, 229, 230, 231, 232, 233]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [227]}}, {"event_type": "FIN", "arguments": [{"text": "sizable improvements", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["sizable", "improvements"], "offsets": [236, 237]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [235]}}, {"event_type": "CMP", "arguments": [{"text": "sizable improvements", "nugget_type": "STR", "argument_type": "Result", "tokens": ["sizable", "improvements"], "offsets": [236, 237]}], "trigger": {"text": "sizable improvements", "tokens": ["sizable", "improvements"], "offsets": [236, 237]}}], "document": ["the", "recent", "proliferation", "of", "\u201d", "fake", "news", "\u201d", "has", "triggered", "a", "number", "of", "responses", ",", "most", "notably", "the", "emergence", "of", "several", "manual", "fact", "-", "checking", "initiatives", ".", "as", "a", "result", "and", "over", "time", ",", "a", "large", "number", "of", "fact", "-", "checked", "claims", "have", "been", "accumulated", ",", "which", "increases", "the", "likelihood", "that", "a", "new", "claim", "in", "social", "media", "or", "a", "new", "statement", "by", "a", "politician", "might", "have", "already", "been", "fact", "-", "checked", "by", "some", "trusted", "fact", "-", "checking", "organization", ",", "as", "viral", "claims", "often", "come", "back", "after", "a", "while", "in", "social", "media", ",", "and", "politicians", "like", "to", "repeat", "their", "favorite", "statements", ",", "true", "or", "false", ",", "over", "and", "over", "again", ".", "as", "manual", "fact", "-", "checking", "is", "very", "time", "-", "consuming", "(", "and", "fully", "automatic", "fact", "-", "checking", "has", "credibility", "issues", ")", ",", "it", "is", "important", "to", "try", "to", "save", "this", "effort", "and", "to", "avoid", "wasting", "time", "on", "claims", "that", "have", "already", "been", "fact", "-", "checked", ".", "interestingly", ",", "despite", "the", "importance", "of", "the", "task", ",", "it", "has", "been", "largely", "ignored", "by", "the", "research", "community", "so", "far", ".", "here", ",", "we", "aim", "to", "bridge", "this", "gap", ".", "in", "particular", ",", "we", "formulate", "the", "task", "and", "we", "discuss", "how", "it", "relates", "to", ",", "but", "also", "differs", "from", ",", "previous", "work", ".", "we", "further", "create", "a", "specialized", "dataset", ",", "which", "we", "release", "to", "the", "research", "community", ".", "finally", ",", "we", "present", "learning", "-", "to", "-", "rank", "experiments", "that", "demonstrate", "sizable", "improvements", "over", "state", "-", "of", "-", "the", "-", "art", "retrieval", "and", "textual", "similarity", "approaches", "."]}, {"venue": "ACL", "title": "Comprehensive Study: How the Context Information of Different Granularity Affects Dialogue State Tracking?", "abstract": "Dialogue state tracking (DST) plays a key role in task-oriented dialogue systems to monitor the user\u2019s goal. In general, there are two strategies to track a dialogue state: predicting it from scratch and updating it from previous state. The scratch-based strategy obtains each slot value by inquiring all the dialogue history, and the previous-based strategy relies on the current turn dialogue to update the previous dialogue state. However, it is hard for the scratch-based strategy to correctly track short-dependency dialogue state because of noise; meanwhile, the previous-based strategy is not very useful for long-dependency dialogue state tracking. Obviously, it plays different roles for the context information of different granularity to track different kinds of dialogue states. Thus, in this paper, we will study and discuss how the context information of different granularity affects dialogue state tracking. First, we explore how greatly different granularities affect dialogue state tracking. Then, we further discuss how to combine multiple granularities for dialogue state tracking. Finally, we apply the findings about context granularity to few-shot learning scenario. Besides, we have publicly released all codes.", "doc_id": "cbe4f0a5062d29c58fe80f36de99dd92", "publication_year": 2021, "sentences": ["dialogue state tracking ( dst ) plays a key role in task - oriented dialogue systems to monitor the user \u2019 s goal .", "in general , there are two strategies to track a dialogue state : predicting it from scratch and updating it from previous state .", "the scratch - based strategy obtains each slot value by inquiring all the dialogue history , and the previous - based strategy relies on the current turn dialogue to update the previous dialogue state .", "however , it is hard for the scratch - based strategy to correctly track short - dependency dialogue state because of noise ; meanwhile , the previous - based strategy is not very useful for long - dependency dialogue state tracking .", "obviously , it plays different roles for the context information of different granularity to track different kinds of dialogue states .", "thus , in this paper , we will study and discuss how the context information of different granularity affects dialogue state tracking .", "first , we explore how greatly different granularities affect dialogue state tracking .", "then , we further discuss how to combine multiple granularities for dialogue state tracking .", "finally , we apply the findings about context granularity to few - shot learning scenario .", "besides , we have publicly released all codes ."], "events": [{"event_type": "ITT", "arguments": [{"text": "task - oriented dialogue systems", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["task", "-", "oriented", "dialogue", "systems"], "offsets": [11, 12, 13, 14, 15]}], "trigger": {"text": "plays", "tokens": ["plays"], "offsets": [6]}}, {"event_type": "RWS", "arguments": [{"text": "dialogue history", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["dialogue", "history"], "offsets": [61, 62]}, {"text": "scratch - based strategy", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["scratch", "-", "based", "strategy"], "offsets": [49, 50, 51, 52]}, {"text": "obtains", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["obtains"], "offsets": [53]}], "trigger": {"text": "inquiring", "tokens": ["inquiring"], "offsets": [58]}}, {"event_type": "PUR", "arguments": [{"text": "slot value", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["slot", "value"], "offsets": [55, 56]}], "trigger": {"text": "obtains", "tokens": ["obtains"], "offsets": [53]}}, {"event_type": "RWS", "arguments": [{"text": "previous - based strategy", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["previous", "-", "based", "strategy"], "offsets": [66, 67, 68, 69]}, {"text": "current turn dialogue", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["current", "turn", "dialogue"], "offsets": [73, 74, 75]}, {"text": "update", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["update"], "offsets": [77]}], "trigger": {"text": "relies on", "tokens": ["relies", "on"], "offsets": [70, 71]}}, {"event_type": "PUR", "arguments": [{"text": "previous dialogue state", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["previous", "dialogue", "state"], "offsets": [79, 80, 81]}], "trigger": {"text": "update", "tokens": ["update"], "offsets": [77]}}, {"event_type": "RWF", "arguments": [{"text": "scratch - based strategy", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["scratch", "-", "based", "strategy"], "offsets": [90, 91, 92, 93]}, {"text": "correctly track", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["correctly", "track"], "offsets": [95, 96]}, {"text": "hard", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["hard"], "offsets": [87]}], "trigger": {"text": "hard", "tokens": ["hard"], "offsets": [87]}}, {"event_type": "PUR", "arguments": [{"text": "short - dependency dialogue state", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["short", "-", "dependency", "dialogue", "state"], "offsets": [97, 98, 99, 100, 101]}], "trigger": {"text": "correctly track", "tokens": ["correctly", "track"], "offsets": [95, 96]}}, {"event_type": "RWF", "arguments": [{"text": "not very useful", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["not", "very", "useful"], "offsets": [114, 115, 116]}, {"text": "previous - based strategy", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["previous", "-", "based", "strategy"], "offsets": [109, 110, 111, 112]}], "trigger": {"text": "not very useful", "tokens": ["not", "very", "useful"], "offsets": [114, 115, 116]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [152]}, {"text": "context information", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["context", "information"], "offsets": [159, 160]}], "trigger": {"text": "study and discuss", "tokens": ["study", "and", "discuss"], "offsets": [154, 155, 156]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [171]}, {"text": "different granularities", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["different", "granularities"], "offsets": [175, 176]}], "trigger": {"text": "explore", "tokens": ["explore"], "offsets": [172]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [184]}, {"text": "multiple granularities", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["multiple", "granularities"], "offsets": [190, 191]}, {"text": "dialogue state tracking", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["dialogue", "state", "tracking"], "offsets": [193, 194, 195]}], "trigger": {"text": "combine", "tokens": ["combine"], "offsets": [189]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [199]}, {"text": "findings about context granularity", "nugget_type": "APP", "argument_type": "Content", "tokens": ["findings", "about", "context", "granularity"], "offsets": [202, 203, 204, 205]}, {"text": "few - shot learning scenario", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["few", "-", "shot", "learning", "scenario"], "offsets": [207, 208, 209, 210, 211]}], "trigger": {"text": "apply", "tokens": ["apply"], "offsets": [200]}}], "document": ["dialogue", "state", "tracking", "(", "dst", ")", "plays", "a", "key", "role", "in", "task", "-", "oriented", "dialogue", "systems", "to", "monitor", "the", "user", "\u2019", "s", "goal", ".", "in", "general", ",", "there", "are", "two", "strategies", "to", "track", "a", "dialogue", "state", ":", "predicting", "it", "from", "scratch", "and", "updating", "it", "from", "previous", "state", ".", "the", "scratch", "-", "based", "strategy", "obtains", "each", "slot", "value", "by", "inquiring", "all", "the", "dialogue", "history", ",", "and", "the", "previous", "-", "based", "strategy", "relies", "on", "the", "current", "turn", "dialogue", "to", "update", "the", "previous", "dialogue", "state", ".", "however", ",", "it", "is", "hard", "for", "the", "scratch", "-", "based", "strategy", "to", "correctly", "track", "short", "-", "dependency", "dialogue", "state", "because", "of", "noise", ";", "meanwhile", ",", "the", "previous", "-", "based", "strategy", "is", "not", "very", "useful", "for", "long", "-", "dependency", "dialogue", "state", "tracking", ".", "obviously", ",", "it", "plays", "different", "roles", "for", "the", "context", "information", "of", "different", "granularity", "to", "track", "different", "kinds", "of", "dialogue", "states", ".", "thus", ",", "in", "this", "paper", ",", "we", "will", "study", "and", "discuss", "how", "the", "context", "information", "of", "different", "granularity", "affects", "dialogue", "state", "tracking", ".", "first", ",", "we", "explore", "how", "greatly", "different", "granularities", "affect", "dialogue", "state", "tracking", ".", "then", ",", "we", "further", "discuss", "how", "to", "combine", "multiple", "granularities", "for", "dialogue", "state", "tracking", ".", "finally", ",", "we", "apply", "the", "findings", "about", "context", "granularity", "to", "few", "-", "shot", "learning", "scenario", ".", "besides", ",", "we", "have", "publicly", "released", "all", "codes", "."]}, {"venue": "ACL", "title": "What\u2019s in the Box? An Analysis of Undesirable Content in the Common Crawl Corpus", "abstract": "Whereas much of the success of the current generation of neural language models has been driven by increasingly large training corpora, relatively little research has been dedicated to analyzing these massive sources of textual data. In this exploratory analysis, we delve deeper into the Common Crawl, a colossal web corpus that is extensively used for training language models. We find that it contains a significant amount of undesirable content, including hate speech and sexually explicit content, even after filtering procedures. We discuss the potential impacts of this content on language models and conclude with future research directions and a more mindful approach to corpus collection and analysis.", "doc_id": "66a4582efa774a5773a68cdad8b60a0a", "publication_year": 2021, "sentences": ["whereas much of the success of the current generation of neural language models has been driven by increasingly large training corpora , relatively little research has been dedicated to analyzing these massive sources of textual data .", "in this exploratory analysis , we delve deeper into the common crawl , a colossal web corpus that is extensively used for training language models .", "we find that it contains a significant amount of undesirable content , including hate speech and sexually explicit content , even after filtering procedures .", "we discuss the potential impacts of this content on language models and conclude with future research directions and a more mindful approach to corpus collection and analysis ."], "events": [{"event_type": "ITT", "arguments": [{"text": "increasingly large training corpora", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["increasingly", "large", "training", "corpora"], "offsets": [17, 18, 19, 20]}, {"text": "neural language models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["neural", "language", "models"], "offsets": [10, 11, 12]}], "trigger": {"text": "driven", "tokens": ["driven"], "offsets": [15]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [42]}, {"text": "common crawl", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["common", "crawl"], "offsets": [47, 48]}], "trigger": {"text": "delve", "tokens": ["delve"], "offsets": [43]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [63]}, {"text": "contains", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["contains"], "offsets": [67]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [64]}}, {"event_type": "FAC", "arguments": [{"text": "significant amount", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significant", "amount"], "offsets": [69, 70]}, {"text": "undesirable content", "nugget_type": "WEA", "argument_type": "Object", "tokens": ["undesirable", "content"], "offsets": [72, 73]}, {"text": "after filtering procedures", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["after", "filtering", "procedures"], "offsets": [84, 85, 86]}, {"text": "common crawl", "nugget_type": "DST", "argument_type": "Subject", "tokens": ["common", "crawl"], "offsets": [47, 48]}, {"text": "sexually explicit content", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["sexually", "explicit", "content"], "offsets": [79, 80, 81]}, {"text": "hate speech", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["hate", "speech"], "offsets": [76, 77]}], "trigger": {"text": "contains", "tokens": ["contains"], "offsets": [67]}}], "document": ["whereas", "much", "of", "the", "success", "of", "the", "current", "generation", "of", "neural", "language", "models", "has", "been", "driven", "by", "increasingly", "large", "training", "corpora", ",", "relatively", "little", "research", "has", "been", "dedicated", "to", "analyzing", "these", "massive", "sources", "of", "textual", "data", ".", "in", "this", "exploratory", "analysis", ",", "we", "delve", "deeper", "into", "the", "common", "crawl", ",", "a", "colossal", "web", "corpus", "that", "is", "extensively", "used", "for", "training", "language", "models", ".", "we", "find", "that", "it", "contains", "a", "significant", "amount", "of", "undesirable", "content", ",", "including", "hate", "speech", "and", "sexually", "explicit", "content", ",", "even", "after", "filtering", "procedures", ".", "we", "discuss", "the", "potential", "impacts", "of", "this", "content", "on", "language", "models", "and", "conclude", "with", "future", "research", "directions", "and", "a", "more", "mindful", "approach", "to", "corpus", "collection", "and", "analysis", "."]}, {"venue": "ACL", "title": "TalkSumm: A Dataset and Scalable Annotation Method for Scientific Paper Summarization Based on Conference Talks", "abstract": "Currently, no large-scale training data is available for the task of scientific paper summarization. In this paper, we propose a novel method that automatically generates summaries for scientific papers, by utilizing videos of talks at scientific conferences. We hypothesize that such talks constitute a coherent and concise description of the papers\u2019 content, and can form the basis for good summaries. We collected 1716 papers and their corresponding videos, and created a dataset of paper summaries. A model trained on this dataset achieves similar performance as models trained on a dataset of summaries created manually. In addition, we validated the quality of our summaries by human experts.", "doc_id": "3e24f53f0e1872e8042a9c79924c06db", "publication_year": 2019, "sentences": ["currently , no large - scale training data is available for the task of scientific paper summarization .", "in this paper , we propose a novel method that automatically generates summaries for scientific papers , by utilizing videos of talks at scientific conferences .", "we hypothesize that such talks constitute a coherent and concise description of the papers \u2019 content , and can form the basis for good summaries .", "we collected 1716 papers and their corresponding videos , and created a dataset of paper summaries .", "a model trained on this dataset achieves similar performance as models trained on a dataset of summaries created manually .", "in addition , we validated the quality of our summaries by human experts ."], "events": [{"event_type": "RWF", "arguments": [{"text": "no large - scale training data", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["no", "large", "-", "scale", "training", "data"], "offsets": [2, 3, 4, 5, 6, 7]}, {"text": "scientific paper summarization", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["scientific", "paper", "summarization"], "offsets": [14, 15, 16]}], "trigger": {"text": "available", "tokens": ["available"], "offsets": [9]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [22]}, {"text": "novel method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["novel", "method"], "offsets": [25, 26]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [23]}}, {"event_type": "MDS", "arguments": [{"text": "videos of talks at scientific conferences", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["videos", "of", "talks", "at", "scientific", "conferences"], "offsets": [37, 38, 39, 40, 41, 42]}, {"text": "automatically generates", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["automatically", "generates"], "offsets": [28, 29]}], "trigger": {"text": "utilizing", "tokens": ["utilizing"], "offsets": [36]}}, {"event_type": "PUR", "arguments": [{"text": "summaries for scientific papers", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["summaries", "for", "scientific", "papers"], "offsets": [30, 31, 32, 33]}], "trigger": {"text": "automatically generates", "tokens": ["automatically", "generates"], "offsets": [28, 29]}}, {"event_type": "CMP", "arguments": [{"text": "similar performance", "nugget_type": "STR", "argument_type": "Result", "tokens": ["similar", "performance"], "offsets": [94, 95]}, {"text": "model trained on this dataset", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["model", "trained", "on", "this", "dataset"], "offsets": [88, 89, 90, 91, 92]}, {"text": "models trained on a dataset of summaries created manually", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["models", "trained", "on", "a", "dataset", "of", "summaries", "created", "manually"], "offsets": [97, 98, 99, 100, 101, 102, 103, 104, 105]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [93]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [110]}, {"text": "quality of our summaries", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["quality", "of", "our", "summaries"], "offsets": [113, 114, 115, 116]}, {"text": "by human experts", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["by", "human", "experts"], "offsets": [117, 118, 119]}], "trigger": {"text": "validated", "tokens": ["validated"], "offsets": [111]}}], "document": ["currently", ",", "no", "large", "-", "scale", "training", "data", "is", "available", "for", "the", "task", "of", "scientific", "paper", "summarization", ".", "in", "this", "paper", ",", "we", "propose", "a", "novel", "method", "that", "automatically", "generates", "summaries", "for", "scientific", "papers", ",", "by", "utilizing", "videos", "of", "talks", "at", "scientific", "conferences", ".", "we", "hypothesize", "that", "such", "talks", "constitute", "a", "coherent", "and", "concise", "description", "of", "the", "papers", "\u2019", "content", ",", "and", "can", "form", "the", "basis", "for", "good", "summaries", ".", "we", "collected", "1716", "papers", "and", "their", "corresponding", "videos", ",", "and", "created", "a", "dataset", "of", "paper", "summaries", ".", "a", "model", "trained", "on", "this", "dataset", "achieves", "similar", "performance", "as", "models", "trained", "on", "a", "dataset", "of", "summaries", "created", "manually", ".", "in", "addition", ",", "we", "validated", "the", "quality", "of", "our", "summaries", "by", "human", "experts", "."]}, {"venue": "ACL", "title": "PENS: A Dataset and Generic Framework for Personalized News Headline Generation", "abstract": "In this paper, we formulate the personalized news headline generation problem whose goal is to output a user-specific title based on both a user\u2019s reading interests and a candidate news body to be exposed to her. To build up a benchmark for this problem, we publicize a large-scale dataset named PENS (PErsonalized News headlineS). The training set is collected from user impressions logs of Microsoft News, and the test set is manually created by hundreds of native speakers to enable a fair testbed for evaluating models in an offline mode. We propose a generic framework as a preparatory solution to our problem. At its heart, user preference is learned by leveraging the user behavioral data, and three kinds of user preference injections are proposed to personalize a text generator and establish personalized headlines. We investigate our dataset by implementing several state-of-the-art user modeling methods in our framework to demonstrate a benchmark score for the proposed dataset. The dataset is available at https://msnews.github.io/pens.html.", "doc_id": "ab4aca25633f0a18d30c6d65ce812e09", "publication_year": 2021, "sentences": ["in this paper , we formulate the personalized news headline generation problem whose goal is to output a user - specific title based on both a user \u2019 s reading interests and a candidate news body to be exposed to her .", "to build up a benchmark for this problem , we publicize a large - scale dataset named pens ( personalized news headlines ) .", "the training set is collected from user impressions logs of microsoft news , and the test set is manually created by hundreds of native speakers to enable a fair testbed for evaluating models in an offline mode .", "we propose a generic framework as a preparatory solution to our problem .", "at its heart , user preference is learned by leveraging the user behavioral data , and three kinds of user preference injections are proposed to personalize a text generator and establish personalized headlines .", "we investigate our dataset by implementing several state - of - the - art user modeling methods in our framework to demonstrate a benchmark score for the proposed dataset .", "the dataset is available at https : / / msnews . github . io / pens . html ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [4]}, {"text": "personalized news headline generation problem", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["personalized", "news", "headline", "generation", "problem"], "offsets": [7, 8, 9, 10, 11]}, {"text": "output", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["output"], "offsets": [16]}], "trigger": {"text": "formulate", "tokens": ["formulate"], "offsets": [5]}}, {"event_type": "PUR", "arguments": [{"text": "user - specific title", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["user", "-", "specific", "title"], "offsets": [18, 19, 20, 21]}, {"text": "based on both a user \u2019 s reading interests and a candidate news body to be exposed to her", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["based", "on", "both", "a", "user", "\u2019", "s", "reading", "interests", "and", "a", "candidate", "news", "body", "to", "be", "exposed", "to", "her"], "offsets": [22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40]}], "trigger": {"text": "output", "tokens": ["output"], "offsets": [16]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [51]}, {"text": "build", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["build"], "offsets": [43]}, {"text": "pens", "nugget_type": "DST", "argument_type": "Content", "tokens": ["pens"], "offsets": [59]}], "trigger": {"text": "publicize", "tokens": ["publicize"], "offsets": [52]}}, {"event_type": "PUR", "arguments": [{"text": "benchmark", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["benchmark"], "offsets": [46]}], "trigger": {"text": "build", "tokens": ["build"], "offsets": [43]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [104]}, {"text": "generic framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["generic", "framework"], "offsets": [107, 108]}, {"text": "preparatory solution to our problem", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["preparatory", "solution", "to", "our", "problem"], "offsets": [111, 112, 113, 114, 115]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [105]}}, {"event_type": "MDS", "arguments": [{"text": "user behavioral data", "nugget_type": "DST", "argument_type": "BaseComponent", "tokens": ["user", "behavioral", "data"], "offsets": [128, 129, 130]}, {"text": "user preference", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["user", "preference"], "offsets": [121, 122]}], "trigger": {"text": "learned", "tokens": ["learned"], "offsets": [124]}}, {"event_type": "MDS", "arguments": [{"text": "personalize", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["personalize"], "offsets": [142]}, {"text": "establish", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["establish"], "offsets": [147]}, {"text": "three kinds of user preference injections", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["three", "kinds", "of", "user", "preference", "injections"], "offsets": [133, 134, 135, 136, 137, 138]}], "trigger": {"text": "proposed", "tokens": ["proposed"], "offsets": [140]}}, {"event_type": "PUR", "arguments": [{"text": "text generator", "nugget_type": "MOD", "argument_type": "Aim", "tokens": ["text", "generator"], "offsets": [144, 145]}], "trigger": {"text": "personalize", "tokens": ["personalize"], "offsets": [142]}}, {"event_type": "PUR", "arguments": [{"text": "personalized headlines", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["personalized", "headlines"], "offsets": [148, 149]}], "trigger": {"text": "establish", "tokens": ["establish"], "offsets": [147]}}, {"event_type": "MDS", "arguments": [{"text": "several state - of - the - art user modeling methods", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["several", "state", "-", "of", "-", "the", "-", "art", "user", "modeling", "methods"], "offsets": [157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167]}, {"text": "framework", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["framework"], "offsets": [170]}, {"text": "investigate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["investigate"], "offsets": [152]}, {"text": "demonstrate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["demonstrate"], "offsets": [172]}], "trigger": {"text": "implementing", "tokens": ["implementing"], "offsets": [156]}}, {"event_type": "PUR", "arguments": [{"text": "dataset", "nugget_type": "DST", "argument_type": "Aim", "tokens": ["dataset"], "offsets": [154]}], "trigger": {"text": "investigate", "tokens": ["investigate"], "offsets": [152]}}, {"event_type": "PUR", "arguments": [{"text": "benchmark score", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["benchmark", "score"], "offsets": [174, 175]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [172]}}], "document": ["in", "this", "paper", ",", "we", "formulate", "the", "personalized", "news", "headline", "generation", "problem", "whose", "goal", "is", "to", "output", "a", "user", "-", "specific", "title", "based", "on", "both", "a", "user", "\u2019", "s", "reading", "interests", "and", "a", "candidate", "news", "body", "to", "be", "exposed", "to", "her", ".", "to", "build", "up", "a", "benchmark", "for", "this", "problem", ",", "we", "publicize", "a", "large", "-", "scale", "dataset", "named", "pens", "(", "personalized", "news", "headlines", ")", ".", "the", "training", "set", "is", "collected", "from", "user", "impressions", "logs", "of", "microsoft", "news", ",", "and", "the", "test", "set", "is", "manually", "created", "by", "hundreds", "of", "native", "speakers", "to", "enable", "a", "fair", "testbed", "for", "evaluating", "models", "in", "an", "offline", "mode", ".", "we", "propose", "a", "generic", "framework", "as", "a", "preparatory", "solution", "to", "our", "problem", ".", "at", "its", "heart", ",", "user", "preference", "is", "learned", "by", "leveraging", "the", "user", "behavioral", "data", ",", "and", "three", "kinds", "of", "user", "preference", "injections", "are", "proposed", "to", "personalize", "a", "text", "generator", "and", "establish", "personalized", "headlines", ".", "we", "investigate", "our", "dataset", "by", "implementing", "several", "state", "-", "of", "-", "the", "-", "art", "user", "modeling", "methods", "in", "our", "framework", "to", "demonstrate", "a", "benchmark", "score", "for", "the", "proposed", "dataset", ".", "the", "dataset", "is", "available", "at", "https", ":", "/", "/", "msnews", ".", "github", ".", "io", "/", "pens", ".", "html", "."]}, {"venue": "ACL", "title": "Robustifying Multi-hop QA through Pseudo-Evidentiality Training", "abstract": "This paper studies the bias problem of multi-hop question answering models, of answering correctly without correct reasoning. One way to robustify these models is by supervising to not only answer right, but also with right reasoning chains. An existing direction is to annotate reasoning chains to train models, requiring expensive additional annotations. In contrast, we propose a new approach to learn evidentiality, deciding whether the answer prediction is supported by correct evidences, without such annotations. Instead, we compare counterfactual changes in answer confidence with and without evidence sentences, to generate \u201cpseudo-evidentiality\u201d annotations. We validate our proposed model on an original set and challenge set in HotpotQA, showing that our method is accurate and robust in multi-hop reasoning.", "doc_id": "864bd6945de829ad06b2777b9d983f72", "publication_year": 2021, "sentences": ["this paper studies the bias problem of multi - hop question answering models , of answering correctly without correct reasoning .", "one way to robustify these models is by supervising to not only answer right , but also with right reasoning chains .", "an existing direction is to annotate reasoning chains to train models , requiring expensive additional annotations .", "in contrast , we propose a new approach to learn evidentiality , deciding whether the answer prediction is supported by correct evidences , without such annotations .", "instead , we compare counterfactual changes in answer confidence with and without evidence sentences , to generate \u201c pseudo - evidentiality \u201d annotations .", "we validate our proposed model on an original set and challenge set in hotpotqa , showing that our method is accurate and robust in multi - hop reasoning ."], "events": [{"event_type": "WKS", "arguments": [{"text": "bias problem of multi - hop question answering models", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["bias", "problem", "of", "multi", "-", "hop", "question", "answering", "models"], "offsets": [4, 5, 6, 7, 8, 9, 10, 11, 12]}], "trigger": {"text": "studies", "tokens": ["studies"], "offsets": [2]}}, {"event_type": "RWF", "arguments": [], "trigger": {"text": "expensive", "tokens": ["expensive"], "offsets": [56]}}, {"event_type": "RWS", "arguments": [{"text": "answer right", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["answer", "right"], "offsets": [33, 34]}, {"text": "right reasoning chains", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["right", "reasoning", "chains"], "offsets": [39, 40, 41]}], "trigger": {"text": "supervising", "tokens": ["supervising"], "offsets": [29]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [63]}, {"text": "approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["approach"], "offsets": [67]}, {"text": "learn", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["learn"], "offsets": [69]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [64]}}, {"event_type": "PUR", "arguments": [{"text": "evidentiality", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["evidentiality"], "offsets": [70]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [69]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [89]}, {"text": "counterfactual changes in answer confidence with and without evidence sentences", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["counterfactual", "changes", "in", "answer", "confidence", "with", "and", "without", "evidence", "sentences"], "offsets": [91, 92, 93, 94, 95, 96, 97, 98, 99, 100]}, {"text": "generate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["generate"], "offsets": [103]}], "trigger": {"text": "compare", "tokens": ["compare"], "offsets": [90]}}, {"event_type": "PUR", "arguments": [{"text": "\u201c pseudo - evidentiality \u201d annotations", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["\u201c", "pseudo", "-", "evidentiality", "\u201d", "annotations"], "offsets": [104, 105, 106, 107, 108, 109]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [103]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [111]}, {"text": "approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["approach"], "offsets": [67]}], "trigger": {"text": "validate", "tokens": ["validate"], "offsets": [112]}}, {"event_type": "FAC", "arguments": [{"text": "approach", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["approach"], "offsets": [67]}, {"text": "robust", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["robust"], "offsets": [133]}, {"text": "accurate", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["accurate"], "offsets": [131]}], "trigger": {"text": "showing", "tokens": ["showing"], "offsets": [126]}}, {"event_type": "RWS", "arguments": [{"text": "answer right", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["answer", "right"], "offsets": [33, 34]}, {"text": "right reasoning chains", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["right", "reasoning", "chains"], "offsets": [39, 40, 41]}, {"text": "robustify", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["robustify"], "offsets": [24]}], "trigger": {"text": "supervising", "tokens": ["supervising"], "offsets": [29]}}, {"event_type": "PUR", "arguments": [{"text": "multi - hop question answering models", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["multi", "-", "hop", "question", "answering", "models"], "offsets": [7, 8, 9, 10, 11, 12]}], "trigger": {"text": "robustify", "tokens": ["robustify"], "offsets": [24]}}], "document": ["this", "paper", "studies", "the", "bias", "problem", "of", "multi", "-", "hop", "question", "answering", "models", ",", "of", "answering", "correctly", "without", "correct", "reasoning", ".", "one", "way", "to", "robustify", "these", "models", "is", "by", "supervising", "to", "not", "only", "answer", "right", ",", "but", "also", "with", "right", "reasoning", "chains", ".", "an", "existing", "direction", "is", "to", "annotate", "reasoning", "chains", "to", "train", "models", ",", "requiring", "expensive", "additional", "annotations", ".", "in", "contrast", ",", "we", "propose", "a", "new", "approach", "to", "learn", "evidentiality", ",", "deciding", "whether", "the", "answer", "prediction", "is", "supported", "by", "correct", "evidences", ",", "without", "such", "annotations", ".", "instead", ",", "we", "compare", "counterfactual", "changes", "in", "answer", "confidence", "with", "and", "without", "evidence", "sentences", ",", "to", "generate", "\u201c", "pseudo", "-", "evidentiality", "\u201d", "annotations", ".", "we", "validate", "our", "proposed", "model", "on", "an", "original", "set", "and", "challenge", "set", "in", "hotpotqa", ",", "showing", "that", "our", "method", "is", "accurate", "and", "robust", "in", "multi", "-", "hop", "reasoning", "."]}, {"venue": "ACL", "title": "Generating Hierarchical Explanations on Text Classification via Feature Interaction Detection", "abstract": "Generating explanations for neural networks has become crucial for their applications in real-world with respect to reliability and trustworthiness. In natural language processing, existing methods usually provide important features which are words or phrases selected from an input text as an explanation, but ignore the interactions between them. It poses challenges for humans to interpret an explanation and connect it to model prediction. In this work, we build hierarchical explanations by detecting feature interactions. Such explanations visualize how words and phrases are combined at different levels of the hierarchy, which can help users understand the decision-making of black-box models. The proposed method is evaluated with three neural text classifiers (LSTM, CNN, and BERT) on two benchmark datasets, via both automatic and human evaluations. Experiments show the effectiveness of the proposed method in providing explanations that are both faithful to models and interpretable to humans.", "doc_id": "1b4c97eb491b79a347f5aebe3db7cc91", "publication_year": 2020, "sentences": ["generating explanations for neural networks has become crucial for their applications in real - world with respect to reliability and trustworthiness .", "in natural language processing , existing methods usually provide important features which are words or phrases selected from an input text as an explanation , but ignore the interactions between them .", "it poses challenges for humans to interpret an explanation and connect it to model prediction .", "in this work , we build hierarchical explanations by detecting feature interactions .", "such explanations visualize how words and phrases are combined at different levels of the hierarchy , which can help users understand the decision - making of black - box models .", "the proposed method is evaluated with three neural text classifiers ( lstm , cnn , and bert ) on two benchmark datasets , via both automatic and human evaluations .", "experiments show the effectiveness of the proposed method in providing explanations that are both faithful to models and interpretable to humans ."], "events": [{"event_type": "ITT", "arguments": [{"text": "in real - world", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "real", "-", "world"], "offsets": [11, 12, 13, 14]}, {"text": "generating explanations for neural networks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["generating", "explanations", "for", "neural", "networks"], "offsets": [0, 1, 2, 3, 4]}], "trigger": {"text": "crucial", "tokens": ["crucial"], "offsets": [7]}}, {"event_type": "RWS", "arguments": [{"text": "in natural language processing", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "natural", "language", "processing"], "offsets": [22, 23, 24, 25]}, {"text": "existing methods", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["existing", "methods"], "offsets": [27, 28]}, {"text": "important features", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["important", "features"], "offsets": [31, 32]}], "trigger": {"text": "provide", "tokens": ["provide"], "offsets": [30]}}, {"event_type": "RWF", "arguments": [{"text": "existing methods", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["existing", "methods"], "offsets": [27, 28]}, {"text": "in natural language processing", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "natural", "language", "processing"], "offsets": [22, 23, 24, 25]}, {"text": "interactions", "nugget_type": "FEA", "argument_type": "Fault", "tokens": ["interactions"], "offsets": [50]}], "trigger": {"text": "ignore", "tokens": ["ignore"], "offsets": [48]}}, {"event_type": "MDS", "arguments": [{"text": "feature interactions", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["feature", "interactions"], "offsets": [80, 81]}], "trigger": {"text": "detecting", "tokens": ["detecting"], "offsets": [79]}}, {"event_type": "MDS", "arguments": [{"text": "words", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["words"], "offsets": [87]}, {"text": "phrases", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["phrases"], "offsets": [89]}, {"text": "at different levels of the hierarchy", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["at", "different", "levels", "of", "the", "hierarchy"], "offsets": [92, 93, 94, 95, 96, 97]}, {"text": "understand", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["understand"], "offsets": [103]}], "trigger": {"text": "combined", "tokens": ["combined"], "offsets": [91]}}, {"event_type": "PUR", "arguments": [{"text": "decision - making of black - box models", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["decision", "-", "making", "of", "black", "-", "box", "models"], "offsets": [105, 106, 107, 108, 109, 110, 111, 112]}], "trigger": {"text": "understand", "tokens": ["understand"], "offsets": [103]}}, {"event_type": "MDS", "arguments": [{"text": "two benchmark datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["two", "benchmark", "datasets"], "offsets": [133, 134, 135]}, {"text": "lstm", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["lstm"], "offsets": [125]}, {"text": "cnn", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["cnn"], "offsets": [127]}, {"text": "bert", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["bert"], "offsets": [130]}, {"text": "automatic evaluations", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["automatic", "evaluations"], "offsets": [139, 142]}, {"text": "human evaluations", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["human", "evaluations"], "offsets": [141, 142]}], "trigger": {"text": "evaluated", "tokens": ["evaluated"], "offsets": [118]}}, {"event_type": "FIN", "arguments": [{"text": "faithful", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["faithful"], "offsets": [158]}, {"text": "interpretable", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["interpretable"], "offsets": [162]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [145]}}, {"event_type": "FAC", "arguments": [{"text": "models", "nugget_type": "APP", "argument_type": "Object", "tokens": ["models"], "offsets": [160]}, {"text": "in providing explanations", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "providing", "explanations"], "offsets": [152, 153, 154]}, {"text": "effectiveness of the proposed method", "nugget_type": "STR", "argument_type": "Subject", "tokens": ["effectiveness", "of", "the", "proposed", "method"], "offsets": [147, 148, 149, 150, 151]}], "trigger": {"text": "faithful", "tokens": ["faithful"], "offsets": [158]}}, {"event_type": "FAC", "arguments": [{"text": "in providing explanations", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "providing", "explanations"], "offsets": [152, 153, 154]}, {"text": "effectiveness of the proposed method", "nugget_type": "STR", "argument_type": "Subject", "tokens": ["effectiveness", "of", "the", "proposed", "method"], "offsets": [147, 148, 149, 150, 151]}], "trigger": {"text": "interpretable", "tokens": ["interpretable"], "offsets": [162]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [74]}, {"text": "hierarchical explanations", "nugget_type": "APP", "argument_type": "Content", "tokens": ["hierarchical", "explanations"], "offsets": [76, 77]}], "trigger": {"text": "build", "tokens": ["build"], "offsets": [75]}}], "document": ["generating", "explanations", "for", "neural", "networks", "has", "become", "crucial", "for", "their", "applications", "in", "real", "-", "world", "with", "respect", "to", "reliability", "and", "trustworthiness", ".", "in", "natural", "language", "processing", ",", "existing", "methods", "usually", "provide", "important", "features", "which", "are", "words", "or", "phrases", "selected", "from", "an", "input", "text", "as", "an", "explanation", ",", "but", "ignore", "the", "interactions", "between", "them", ".", "it", "poses", "challenges", "for", "humans", "to", "interpret", "an", "explanation", "and", "connect", "it", "to", "model", "prediction", ".", "in", "this", "work", ",", "we", "build", "hierarchical", "explanations", "by", "detecting", "feature", "interactions", ".", "such", "explanations", "visualize", "how", "words", "and", "phrases", "are", "combined", "at", "different", "levels", "of", "the", "hierarchy", ",", "which", "can", "help", "users", "understand", "the", "decision", "-", "making", "of", "black", "-", "box", "models", ".", "the", "proposed", "method", "is", "evaluated", "with", "three", "neural", "text", "classifiers", "(", "lstm", ",", "cnn", ",", "and", "bert", ")", "on", "two", "benchmark", "datasets", ",", "via", "both", "automatic", "and", "human", "evaluations", ".", "experiments", "show", "the", "effectiveness", "of", "the", "proposed", "method", "in", "providing", "explanations", "that", "are", "both", "faithful", "to", "models", "and", "interpretable", "to", "humans", "."]}, {"venue": "ACL", "title": "Fact-based Text Editing", "abstract": "We propose a novel text editing task, referred to as fact-based text editing, in which the goal is to revise a given document to better describe the facts in a knowledge base (e.g., several triples). The task is important in practice because reflecting the truth is a common requirement in text editing. First, we propose a method for automatically generating a dataset for research on fact-based text editing, where each instance consists of a draft text, a revised text, and several facts represented in triples. We apply the method into two public table-to-text datasets, obtaining two new datasets consisting of 233k and 37k instances, respectively. Next, we propose a new neural network architecture for fact-based text editing, called FactEditor, which edits a draft text by referring to given facts using a buffer, a stream, and a memory. A straightforward approach to address the problem would be to employ an encoder-decoder model. Our experimental results on the two datasets show that FactEditor outperforms the encoder-decoder approach in terms of fidelity and fluency. The results also show that FactEditor conducts inference faster than the encoder-decoder approach.", "doc_id": "61bffdec76380030853e2de0b581ff28", "publication_year": 2020, "sentences": ["we propose a novel text editing task , referred to as fact - based text editing , in which the goal is to revise a given document to better describe the facts in a knowledge base ( e . g . , several triples ) .", "the task is important in practice because reflecting the truth is a common requirement in text editing .", "first , we propose a method for automatically generating a dataset for research on fact - based text editing , where each instance consists of a draft text , a revised text , and several facts represented in triples .", "we apply the method into two public table - to - text datasets , obtaining two new datasets consisting of 233k and 37k instances , respectively .", "next , we propose a new neural network architecture for fact - based text editing , called facteditor , which edits a draft text by referring to given facts using a buffer , a stream , and a memory .", "a straightforward approach to address the problem would be to employ an encoder - decoder model .", "our experimental results on the two datasets show that facteditor outperforms the encoder - decoder approach in terms of fidelity and fluency .", "the results also show that facteditor conducts inference faster than the encoder - decoder approach ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [0]}, {"text": "text editing task", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["text", "editing", "task"], "offsets": [4, 5, 6]}, {"text": "revise", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["revise"], "offsets": [23]}, {"text": "better describe", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["better", "describe"], "offsets": [28, 29]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [1]}}, {"event_type": "PUR", "arguments": [{"text": "given document", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["given", "document"], "offsets": [25, 26]}], "trigger": {"text": "revise", "tokens": ["revise"], "offsets": [23]}}, {"event_type": "PUR", "arguments": [{"text": "facts in a knowledge base", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["facts", "in", "a", "knowledge", "base"], "offsets": [31, 32, 33, 34, 35]}], "trigger": {"text": "better describe", "tokens": ["better", "describe"], "offsets": [28, 29]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [66]}, {"text": "method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["method"], "offsets": [69]}, {"text": "automatically generating", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["automatically", "generating"], "offsets": [71, 72]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [67]}}, {"event_type": "PUR", "arguments": [{"text": "dataset", "nugget_type": "DST", "argument_type": "Aim", "tokens": ["dataset"], "offsets": [74]}], "trigger": {"text": "automatically generating", "tokens": ["automatically", "generating"], "offsets": [71, 72]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [104]}, {"text": "two public table - to - text datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["two", "public", "table", "-", "to", "-", "text", "datasets"], "offsets": [109, 110, 111, 112, 113, 114, 115, 116]}, {"text": "method for automatically generating a dataset", "nugget_type": "APP", "argument_type": "Content", "tokens": ["method", "for", "automatically", "generating", "a", "dataset"], "offsets": [69, 70, 71, 72, 73, 74]}], "trigger": {"text": "apply", "tokens": ["apply"], "offsets": [105]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [133]}, {"text": "neural network architecture", "nugget_type": "APP", "argument_type": "Content", "tokens": ["neural", "network", "architecture"], "offsets": [137, 138, 139]}, {"text": "fact - based text editing", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["fact", "-", "based", "text", "editing"], "offsets": [141, 142, 143, 144, 145]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [134]}}, {"event_type": "WKS", "arguments": [{"text": "encoder - decoder model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["encoder", "-", "decoder", "model"], "offsets": [183, 184, 185, 186]}, {"text": "address", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["address"], "offsets": [175]}], "trigger": {"text": "employ", "tokens": ["employ"], "offsets": [181]}}, {"event_type": "PUR", "arguments": [{"text": "the problem", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["the", "problem"], "offsets": [176, 177]}], "trigger": {"text": "address", "tokens": ["address"], "offsets": [175]}}, {"event_type": "FIN", "arguments": [{"text": "outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforms"], "offsets": [198]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [195]}}, {"event_type": "CMP", "arguments": [{"text": "two new datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["two", "new", "datasets"], "offsets": [119, 120, 121]}, {"text": "neural network architecture", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["neural", "network", "architecture"], "offsets": [137, 138, 139]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [198]}, {"text": "encoder - decoder approach", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["encoder", "-", "decoder", "approach"], "offsets": [200, 201, 202, 203]}, {"text": "fidelity", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["fidelity"], "offsets": [207]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [198]}}, {"event_type": "FIN", "arguments": [{"text": "conducts", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["conducts"], "offsets": [217]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [214]}}, {"event_type": "CMP", "arguments": [{"text": "inference", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["inference"], "offsets": [218]}, {"text": "faster", "nugget_type": "STR", "argument_type": "Result", "tokens": ["faster"], "offsets": [219]}, {"text": "encoder - decoder approach", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["encoder", "-", "decoder", "approach"], "offsets": [222, 223, 224, 225]}, {"text": "neural network architecture", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["neural", "network", "architecture"], "offsets": [137, 138, 139]}], "trigger": {"text": "conducts", "tokens": ["conducts"], "offsets": [217]}}], "document": ["we", "propose", "a", "novel", "text", "editing", "task", ",", "referred", "to", "as", "fact", "-", "based", "text", "editing", ",", "in", "which", "the", "goal", "is", "to", "revise", "a", "given", "document", "to", "better", "describe", "the", "facts", "in", "a", "knowledge", "base", "(", "e", ".", "g", ".", ",", "several", "triples", ")", ".", "the", "task", "is", "important", "in", "practice", "because", "reflecting", "the", "truth", "is", "a", "common", "requirement", "in", "text", "editing", ".", "first", ",", "we", "propose", "a", "method", "for", "automatically", "generating", "a", "dataset", "for", "research", "on", "fact", "-", "based", "text", "editing", ",", "where", "each", "instance", "consists", "of", "a", "draft", "text", ",", "a", "revised", "text", ",", "and", "several", "facts", "represented", "in", "triples", ".", "we", "apply", "the", "method", "into", "two", "public", "table", "-", "to", "-", "text", "datasets", ",", "obtaining", "two", "new", "datasets", "consisting", "of", "233k", "and", "37k", "instances", ",", "respectively", ".", "next", ",", "we", "propose", "a", "new", "neural", "network", "architecture", "for", "fact", "-", "based", "text", "editing", ",", "called", "facteditor", ",", "which", "edits", "a", "draft", "text", "by", "referring", "to", "given", "facts", "using", "a", "buffer", ",", "a", "stream", ",", "and", "a", "memory", ".", "a", "straightforward", "approach", "to", "address", "the", "problem", "would", "be", "to", "employ", "an", "encoder", "-", "decoder", "model", ".", "our", "experimental", "results", "on", "the", "two", "datasets", "show", "that", "facteditor", "outperforms", "the", "encoder", "-", "decoder", "approach", "in", "terms", "of", "fidelity", "and", "fluency", ".", "the", "results", "also", "show", "that", "facteditor", "conducts", "inference", "faster", "than", "the", "encoder", "-", "decoder", "approach", "."]}, {"venue": "ACL", "title": "Learning Low-Resource End-To-End Goal-Oriented Dialog for Fast and Reliable System Deployment", "abstract": "Existing end-to-end dialog systems perform less effectively when data is scarce. To obtain an acceptable success in real-life online services with only a handful of training examples, both fast adaptability and reliable performance are highly desirable for dialog systems. In this paper, we propose the Meta-Dialog System (MDS), which combines the advantages of both meta-learning approaches and human-machine collaboration. We evaluate our methods on a new extended-bAbI dataset and a transformed MultiWOZ dataset for low-resource goal-oriented dialog learning. Experimental results show that MDS significantly outperforms non-meta-learning baselines and can achieve more than 90% per-turn accuracies with only 10 dialogs on the extended-bAbI dataset.", "doc_id": "4c808773fb225fe6f29fad8fd5680010", "publication_year": 2020, "sentences": ["existing end - to - end dialog systems perform less effectively when data is scarce .", "to obtain an acceptable success in real - life online services with only a handful of training examples , both fast adaptability and reliable performance are highly desirable for dialog systems .", "in this paper , we propose the meta - dialog system ( mds ) , which combines the advantages of both meta - learning approaches and human - machine collaboration .", "we evaluate our methods on a new extended - babi dataset and a transformed multiwoz dataset for low - resource goal - oriented dialog learning .", "experimental results show that mds significantly outperforms non - meta - learning baselines and can achieve more than 90 % per - turn accuracies with only 10 dialogs on the extended - babi dataset ."], "events": [{"event_type": "RWF", "arguments": [{"text": "existing end - to - end dialog systems", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["existing", "end", "-", "to", "-", "end", "dialog", "systems"], "offsets": [0, 1, 2, 3, 4, 5, 6, 7]}, {"text": "less effectively", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["less", "effectively"], "offsets": [9, 10]}, {"text": "when data is scarce", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "data", "is", "scarce"], "offsets": [11, 12, 13, 14]}], "trigger": {"text": "perform", "tokens": ["perform"], "offsets": [8]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [52]}, {"text": "meta - dialog system", "nugget_type": "APP", "argument_type": "Content", "tokens": ["meta", "-", "dialog", "system"], "offsets": [55, 56, 57, 58]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [53]}}, {"event_type": "MDS", "arguments": [{"text": "meta - learning approaches", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["meta", "-", "learning", "approaches"], "offsets": [69, 70, 71, 72]}, {"text": "human - machine collaboration", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["human", "-", "machine", "collaboration"], "offsets": [74, 75, 76, 77]}], "trigger": {"text": "combines", "tokens": ["combines"], "offsets": [64]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [79]}, {"text": "meta - dialog system", "nugget_type": "APP", "argument_type": "Content", "tokens": ["meta", "-", "dialog", "system"], "offsets": [55, 56, 57, 58]}, {"text": "new extended - babi dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["new", "extended", "-", "babi", "dataset"], "offsets": [85, 86, 87, 88, 89]}, {"text": "transformed multiwoz dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["transformed", "multiwoz", "dataset"], "offsets": [92, 93, 94]}, {"text": "low - resource goal - oriented dialog learning", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["low", "-", "resource", "goal", "-", "oriented", "dialog", "learning"], "offsets": [96, 97, 98, 99, 100, 101, 102, 103]}], "trigger": {"text": "evaluate", "tokens": ["evaluate"], "offsets": [80]}}, {"event_type": "FIN", "arguments": [{"text": "outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforms"], "offsets": [111]}, {"text": "achieve", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["achieve"], "offsets": [120]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [107]}}, {"event_type": "CMP", "arguments": [{"text": "meta - dialog system", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["meta", "-", "dialog", "system"], "offsets": [55, 56, 57, 58]}, {"text": "significantly", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significantly"], "offsets": [110]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [111]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [111]}}, {"event_type": "FAC", "arguments": [{"text": "meta - dialog system", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["meta", "-", "dialog", "system"], "offsets": [55, 56, 57, 58]}, {"text": "per - turn accuracies", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["per", "-", "turn", "accuracies"], "offsets": [125, 126, 127, 128]}, {"text": "90 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["90", "%"], "offsets": [123, 124]}, {"text": "extended - babi dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["extended", "-", "babi", "dataset"], "offsets": [135, 136, 137, 138]}, {"text": "with only 10 dialogs", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "only", "10", "dialogs"], "offsets": [129, 130, 131, 132]}], "trigger": {"text": "achieve", "tokens": ["achieve"], "offsets": [120]}}], "document": ["existing", "end", "-", "to", "-", "end", "dialog", "systems", "perform", "less", "effectively", "when", "data", "is", "scarce", ".", "to", "obtain", "an", "acceptable", "success", "in", "real", "-", "life", "online", "services", "with", "only", "a", "handful", "of", "training", "examples", ",", "both", "fast", "adaptability", "and", "reliable", "performance", "are", "highly", "desirable", "for", "dialog", "systems", ".", "in", "this", "paper", ",", "we", "propose", "the", "meta", "-", "dialog", "system", "(", "mds", ")", ",", "which", "combines", "the", "advantages", "of", "both", "meta", "-", "learning", "approaches", "and", "human", "-", "machine", "collaboration", ".", "we", "evaluate", "our", "methods", "on", "a", "new", "extended", "-", "babi", "dataset", "and", "a", "transformed", "multiwoz", "dataset", "for", "low", "-", "resource", "goal", "-", "oriented", "dialog", "learning", ".", "experimental", "results", "show", "that", "mds", "significantly", "outperforms", "non", "-", "meta", "-", "learning", "baselines", "and", "can", "achieve", "more", "than", "90", "%", "per", "-", "turn", "accuracies", "with", "only", "10", "dialogs", "on", "the", "extended", "-", "babi", "dataset", "."]}, {"venue": "ACL", "title": "Hierarchy-aware Label Semantics Matching Network for Hierarchical Text Classification", "abstract": "Hierarchical text classification is an important yet challenging task due to the complex structure of the label hierarchy. Existing methods ignore the semantic relationship between text and labels, so they cannot make full use of the hierarchical information. To this end, we formulate the text-label semantics relationship as a semantic matching problem and thus propose a hierarchy-aware label semantics matching network (HiMatch). First, we project text semantics and label semantics into a joint embedding space. We then introduce a joint embedding loss and a matching learning loss to model the matching relationship between the text semantics and the label semantics. Our model captures the text-label semantics matching relationship among coarse-grained labels and fine-grained labels in a hierarchy-aware manner. The experimental results on various benchmark datasets verify that our model achieves state-of-the-art results.", "doc_id": "b5276f9317a0868ba04521612a75cd91", "publication_year": 2021, "sentences": ["hierarchical text classification is an important yet challenging task due to the complex structure of the label hierarchy .", "existing methods ignore the semantic relationship between text and labels , so they cannot make full use of the hierarchical information .", "to this end , we formulate the text - label semantics relationship as a semantic matching problem and thus propose a hierarchy - aware label semantics matching network ( himatch ) .", "first , we project text semantics and label semantics into a joint embedding space .", "we then introduce a joint embedding loss and a matching learning loss to model the matching relationship between the text semantics and the label semantics .", "our model captures the text - label semantics matching relationship among coarse - grained labels and fine - grained labels in a hierarchy - aware manner .", "the experimental results on various benchmark datasets verify that our model achieves state - of - the - art results ."], "events": [{"event_type": "ITT", "arguments": [{"text": "hierarchical text classification", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["hierarchical", "text", "classification"], "offsets": [0, 1, 2]}], "trigger": {"text": "task", "tokens": ["task"], "offsets": [8]}}, {"event_type": "RWF", "arguments": [{"text": "ignore", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["ignore"], "offsets": [21]}, {"text": "semantic relationship", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["semantic", "relationship"], "offsets": [23, 24]}], "trigger": {"text": "ignore", "tokens": ["ignore"], "offsets": [21]}}, {"event_type": "RWF", "arguments": [{"text": "cannot make full use of", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["cannot", "make", "full", "use", "of"], "offsets": [32, 33, 34, 35, 36]}], "trigger": {"text": "cannot make full use of", "tokens": ["cannot", "make", "full", "use", "of"], "offsets": [32, 33, 34, 35, 36]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [45]}, {"text": "hierarchy - aware label semantics matching network", "nugget_type": "APP", "argument_type": "Content", "tokens": ["hierarchy", "-", "aware", "label", "semantics", "matching", "network"], "offsets": [62, 63, 64, 65, 66, 67, 68]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [60]}}, {"event_type": "MDS", "arguments": [{"text": "joint embedding space", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["joint", "embedding", "space"], "offsets": [84, 85, 86]}, {"text": "text semantics", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["text", "semantics"], "offsets": [77, 78]}, {"text": "label semantics", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["label", "semantics"], "offsets": [80, 81]}], "trigger": {"text": "project", "tokens": ["project"], "offsets": [76]}}, {"event_type": "MDS", "arguments": [{"text": "joint embedding loss", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["joint", "embedding", "loss"], "offsets": [92, 93, 94]}, {"text": "matching learning loss", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["matching", "learning", "loss"], "offsets": [97, 98, 99]}, {"text": "model", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["model"], "offsets": [101]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [90]}}, {"event_type": "MDS", "arguments": [{"text": "in a hierarchy - aware manner", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "a", "hierarchy", "-", "aware", "manner"], "offsets": [134, 135, 136, 137, 138, 139]}, {"text": "text - label semantics matching relationship", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["text", "-", "label", "semantics", "matching", "relationship"], "offsets": [118, 119, 120, 121, 122, 123]}, {"text": "coarse - grained labels", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["coarse", "-", "grained", "labels"], "offsets": [125, 126, 127, 128]}, {"text": "fine - grained labels", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["fine", "-", "grained", "labels"], "offsets": [130, 131, 132, 133]}], "trigger": {"text": "captures", "tokens": ["captures"], "offsets": [116]}}, {"event_type": "FAC", "arguments": [{"text": "hierarchy - aware label semantics matching network", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["hierarchy", "-", "aware", "label", "semantics", "matching", "network"], "offsets": [62, 63, 64, 65, 66, 67, 68]}, {"text": "various benchmark datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["various", "benchmark", "datasets"], "offsets": [145, 146, 147]}, {"text": "state - of - the - art results", "nugget_type": "STR", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art", "results"], "offsets": [153, 154, 155, 156, 157, 158, 159, 160]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [152]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [45]}, {"text": "text - label semantics relationship", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["text", "-", "label", "semantics", "relationship"], "offsets": [48, 49, 50, 51, 52]}, {"text": "semantic matching problem", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["semantic", "matching", "problem"], "offsets": [55, 56, 57]}], "trigger": {"text": "formulate", "tokens": ["formulate"], "offsets": [46]}}, {"event_type": "PUR", "arguments": [{"text": "matching relationship", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["matching", "relationship"], "offsets": [103, 104]}, {"text": "between the text semantics and the label semantics", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["between", "the", "text", "semantics", "and", "the", "label", "semantics"], "offsets": [105, 106, 107, 108, 109, 110, 111, 112]}], "trigger": {"text": "model", "tokens": ["model"], "offsets": [101]}}, {"event_type": "FIN", "arguments": [{"text": "achieves", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["achieves"], "offsets": [152]}], "trigger": {"text": "verify", "tokens": ["verify"], "offsets": [148]}}], "document": ["hierarchical", "text", "classification", "is", "an", "important", "yet", "challenging", "task", "due", "to", "the", "complex", "structure", "of", "the", "label", "hierarchy", ".", "existing", "methods", "ignore", "the", "semantic", "relationship", "between", "text", "and", "labels", ",", "so", "they", "cannot", "make", "full", "use", "of", "the", "hierarchical", "information", ".", "to", "this", "end", ",", "we", "formulate", "the", "text", "-", "label", "semantics", "relationship", "as", "a", "semantic", "matching", "problem", "and", "thus", "propose", "a", "hierarchy", "-", "aware", "label", "semantics", "matching", "network", "(", "himatch", ")", ".", "first", ",", "we", "project", "text", "semantics", "and", "label", "semantics", "into", "a", "joint", "embedding", "space", ".", "we", "then", "introduce", "a", "joint", "embedding", "loss", "and", "a", "matching", "learning", "loss", "to", "model", "the", "matching", "relationship", "between", "the", "text", "semantics", "and", "the", "label", "semantics", ".", "our", "model", "captures", "the", "text", "-", "label", "semantics", "matching", "relationship", "among", "coarse", "-", "grained", "labels", "and", "fine", "-", "grained", "labels", "in", "a", "hierarchy", "-", "aware", "manner", ".", "the", "experimental", "results", "on", "various", "benchmark", "datasets", "verify", "that", "our", "model", "achieves", "state", "-", "of", "-", "the", "-", "art", "results", "."]}, {"venue": "ACL", "title": "Are we Estimating or Guesstimating Translation Quality?", "abstract": "Recent advances in pre-trained multilingual language models lead to state-of-the-art results on the task of quality estimation (QE) for machine translation. A carefully engineered ensemble of such models won the QE shared task at WMT19. Our in-depth analysis, however, shows that the success of using pre-trained language models for QE is over-estimated due to three issues we observed in current QE datasets: (i) The distributions of quality scores are imbalanced and skewed towards good quality scores; (iii) QE models can perform well on these datasets while looking at only source or translated sentences; (iii) They contain statistical artifacts that correlate well with human-annotated QE labels. Our findings suggest that although QE models might capture fluency of translated sentences and complexity of source sentences, they cannot model adequacy of translations effectively.", "doc_id": "ec593a2da4ff1e6c6043dcc04d2c6641", "publication_year": 2020, "sentences": ["recent advances in pre - trained multilingual language models lead to state - of - the - art results on the task of quality estimation ( qe ) for machine translation .", "a carefully engineered ensemble of such models won the qe shared task at wmt19 .", "our in - depth analysis , however , shows that the success of using pre - trained language models for qe is over - estimated due to three issues we observed in current qe datasets : ( i )", "the distributions of quality scores are imbalanced and skewed towards good quality scores ; ( iii ) qe models can perform well on these datasets while looking at only source or translated sentences ; ( iii )", "they contain statistical artifacts that correlate well with human - annotated qe labels .", "our findings suggest that although qe models might capture fluency of translated sentences and complexity of source sentences , they cannot model adequacy of translations effectively ."], "events": [{"event_type": "ITT", "arguments": [{"text": "pre - trained multilingual language models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["pre", "-", "trained", "multilingual", "language", "models"], "offsets": [3, 4, 5, 6, 7, 8]}], "trigger": {"text": "lead", "tokens": ["lead"], "offsets": [9]}}, {"event_type": "FAC", "arguments": [{"text": "success of using pre - trained language models for qe", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["success", "of", "using", "pre", "-", "trained", "language", "models", "for", "quality", "estimation"], "offsets": [58, 59, 60, 61, 62, 63, 64, 65, 66, 23, 24]}, {"text": "current qe datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["current", "quality", "estimation", "datasets"], "offsets": [79, 23, 24, 81]}], "trigger": {"text": "over - estimated", "tokens": ["over", "-", "estimated"], "offsets": [69, 70, 71]}}, {"event_type": "FAC", "arguments": [{"text": "distributions of quality scores", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["distributions", "of", "quality", "scores"], "offsets": [87, 88, 89, 90]}], "trigger": {"text": "imbalanced", "tokens": ["imbalanced"], "offsets": [92]}}, {"event_type": "FAC", "arguments": [{"text": "distributions of quality scores", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["distributions", "of", "quality", "scores"], "offsets": [87, 88, 89, 90]}, {"text": "good quality scores", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["good", "quality", "scores"], "offsets": [96, 97, 98]}], "trigger": {"text": "skewed towards", "tokens": ["skewed", "towards"], "offsets": [94, 95]}}, {"event_type": "FAC", "arguments": [{"text": "qe models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["quality", "estimation", "models"], "offsets": [23, 24, 104]}, {"text": "well", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["well"], "offsets": [107]}, {"text": "current qe datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["current", "quality", "estimation", "datasets"], "offsets": [79, 23, 24, 81]}, {"text": "while looking at only source or translated sentences", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["while", "looking", "at", "only", "source", "or", "translated", "sentences"], "offsets": [111, 112, 113, 114, 115, 116, 117, 118]}], "trigger": {"text": "perform", "tokens": ["perform"], "offsets": [106]}}, {"event_type": "FAC", "arguments": [{"text": "qe models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["quality", "estimation", "models"], "offsets": [23, 24, 104]}, {"text": "statistical artifacts that correlate well with human - annotated qe labels", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["statistical", "artifacts", "that", "correlate", "well", "with", "human", "-", "annotated", "quality", "estimation", "labels"], "offsets": [125, 126, 127, 128, 129, 130, 131, 132, 133, 23, 24, 135]}], "trigger": {"text": "contain", "tokens": ["contain"], "offsets": [124]}}, {"event_type": "FAC", "arguments": [{"text": "qe models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["quality", "estimation", "models"], "offsets": [23, 24, 143]}, {"text": "fluency of translated sentences", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["fluency", "of", "translated", "sentences"], "offsets": [146, 147, 148, 149]}, {"text": "complexity of source sentences", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["complexity", "of", "source", "sentences"], "offsets": [151, 152, 153, 154]}], "trigger": {"text": "capture", "tokens": ["capture"], "offsets": [145]}}, {"event_type": "FAC", "arguments": [{"text": "qe models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["quality", "estimation", "models"], "offsets": [23, 24, 143]}, {"text": "adequacy of translations", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["adequacy", "of", "translations"], "offsets": [159, 160, 161]}], "trigger": {"text": "cannot model", "tokens": ["cannot", "model"], "offsets": [157, 158]}}], "document": ["recent", "advances", "in", "pre", "-", "trained", "multilingual", "language", "models", "lead", "to", "state", "-", "of", "-", "the", "-", "art", "results", "on", "the", "task", "of", "quality", "estimation", "(", "qe", ")", "for", "machine", "translation", ".", "a", "carefully", "engineered", "ensemble", "of", "such", "models", "won", "the", "qe", "shared", "task", "at", "wmt19", ".", "our", "in", "-", "depth", "analysis", ",", "however", ",", "shows", "that", "the", "success", "of", "using", "pre", "-", "trained", "language", "models", "for", "qe", "is", "over", "-", "estimated", "due", "to", "three", "issues", "we", "observed", "in", "current", "qe", "datasets", ":", "(", "i", ")", "the", "distributions", "of", "quality", "scores", "are", "imbalanced", "and", "skewed", "towards", "good", "quality", "scores", ";", "(", "iii", ")", "qe", "models", "can", "perform", "well", "on", "these", "datasets", "while", "looking", "at", "only", "source", "or", "translated", "sentences", ";", "(", "iii", ")", "they", "contain", "statistical", "artifacts", "that", "correlate", "well", "with", "human", "-", "annotated", "qe", "labels", ".", "our", "findings", "suggest", "that", "although", "qe", "models", "might", "capture", "fluency", "of", "translated", "sentences", "and", "complexity", "of", "source", "sentences", ",", "they", "cannot", "model", "adequacy", "of", "translations", "effectively", "."]}, {"venue": "ACL", "title": "Robustness Testing of Language Understanding in Task-Oriented Dialog", "abstract": "Most language understanding models in task-oriented dialog systems are trained on a small amount of annotated training data, and evaluated in a small set from the same distribution. However, these models can lead to system failure or undesirable output when being exposed to natural language perturbation or variation in practice. In this paper, we conduct comprehensive evaluation and analysis with respect to the robustness of natural language understanding models, and introduce three important aspects related to language understanding in real-world dialog systems, namely, language variety, speech characteristics, and noise perturbation. We propose a model-agnostic toolkit LAUG to approximate natural language perturbations for testing the robustness issues in task-oriented dialog. Four data augmentation approaches covering the three aspects are assembled in LAUG, which reveals critical robustness issues in state-of-the-art models. The augmented dataset through LAUG can be used to facilitate future research on the robustness testing of language understanding in task-oriented dialog.", "doc_id": "7154117bcf0bc3d76f4e333094fbae57", "publication_year": 2021, "sentences": ["most language understanding models in task - oriented dialog systems are trained on a small amount of annotated training data , and evaluated in a small set from the same distribution .", "however , these models can lead to system failure or undesirable output when being exposed to natural language perturbation or variation in practice .", "in this paper , we conduct comprehensive evaluation and analysis with respect to the robustness of natural language understanding models , and introduce three important aspects related to language understanding in real - world dialog systems , namely , language variety , speech characteristics , and noise perturbation .", "we propose a model - agnostic toolkit laug to approximate natural language perturbations for testing the robustness issues in task - oriented dialog .", "four data augmentation approaches covering the three aspects are assembled in laug , which reveals critical robustness issues in state - of - the - art models .", "the augmented dataset through laug can be used to facilitate future research on the robustness testing of language understanding in task - oriented dialog ."], "events": [{"event_type": "ITT", "arguments": [{"text": "task - oriented dialog systems", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["task", "-", "oriented", "dialog", "systems"], "offsets": [5, 6, 7, 8, 9]}], "trigger": {"text": "trained", "tokens": ["trained"], "offsets": [11]}}, {"event_type": "RWF", "arguments": [{"text": "language understanding models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["language", "understanding", "models"], "offsets": [1, 2, 3]}, {"text": "system failure", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["system", "failure"], "offsets": [39, 40]}, {"text": "undesirable output", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["undesirable", "output"], "offsets": [42, 43]}, {"text": "when being exposed to natural language perturbation or variation in practice", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "being", "exposed", "to", "natural", "language", "perturbation", "or", "variation", "in", "practice"], "offsets": [44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54]}], "trigger": {"text": "lead", "tokens": ["lead"], "offsets": [37]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [60]}, {"text": "comprehensive evaluation and analysis", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["comprehensive", "evaluation", "and", "analysis"], "offsets": [62, 63, 64, 65]}, {"text": "robustness of natural language understanding models", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["robustness", "of", "natural", "language", "understanding", "models"], "offsets": [70, 71, 72, 73, 74, 75]}], "trigger": {"text": "conduct", "tokens": ["conduct"], "offsets": [61]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [60]}, {"text": "in real - world dialog systems", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "real", "-", "world", "dialog", "systems"], "offsets": [86, 87, 88, 89, 90, 91]}, {"text": "language understanding", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["language", "understanding"], "offsets": [84, 85]}, {"text": "language variety", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["language", "variety"], "offsets": [95, 96]}, {"text": "speech characteristics", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["speech", "characteristics"], "offsets": [98, 99]}, {"text": "noise perturbation", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["noise", "perturbation"], "offsets": [102, 103]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [78]}}, {"event_type": "WKS", "arguments": [{"text": "testing", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["testing"], "offsets": [119]}, {"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [105]}, {"text": "model - agnostic toolkit", "nugget_type": "APP", "argument_type": "Content", "tokens": ["model", "-", "agnostic", "toolkit"], "offsets": [108, 109, 110, 111]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [106]}}, {"event_type": "PUR", "arguments": [{"text": "robustness issues", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["robustness", "issues"], "offsets": [121, 122]}, {"text": "in task - oriented dialog", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "task", "-", "oriented", "dialog"], "offsets": [123, 124, 125, 126, 127]}], "trigger": {"text": "testing", "tokens": ["testing"], "offsets": [119]}}, {"event_type": "FAC", "arguments": [{"text": "four data augmentation approaches", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["four", "data", "augmentation", "approaches"], "offsets": [129, 130, 131, 132]}, {"text": "in laug", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "laug"], "offsets": [139, 140]}, {"text": "reveals", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["reveals"], "offsets": [143]}], "trigger": {"text": "assembled", "tokens": ["assembled"], "offsets": [138]}}, {"event_type": "PUR", "arguments": [{"text": "critical robustness issues", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["critical", "robustness", "issues"], "offsets": [144, 145, 146]}, {"text": "in state - of - the - art models", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "state", "-", "of", "-", "the", "-", "art", "models"], "offsets": [147, 148, 149, 150, 151, 152, 153, 154, 155]}], "trigger": {"text": "reveals", "tokens": ["reveals"], "offsets": [143]}}], "document": ["most", "language", "understanding", "models", "in", "task", "-", "oriented", "dialog", "systems", "are", "trained", "on", "a", "small", "amount", "of", "annotated", "training", "data", ",", "and", "evaluated", "in", "a", "small", "set", "from", "the", "same", "distribution", ".", "however", ",", "these", "models", "can", "lead", "to", "system", "failure", "or", "undesirable", "output", "when", "being", "exposed", "to", "natural", "language", "perturbation", "or", "variation", "in", "practice", ".", "in", "this", "paper", ",", "we", "conduct", "comprehensive", "evaluation", "and", "analysis", "with", "respect", "to", "the", "robustness", "of", "natural", "language", "understanding", "models", ",", "and", "introduce", "three", "important", "aspects", "related", "to", "language", "understanding", "in", "real", "-", "world", "dialog", "systems", ",", "namely", ",", "language", "variety", ",", "speech", "characteristics", ",", "and", "noise", "perturbation", ".", "we", "propose", "a", "model", "-", "agnostic", "toolkit", "laug", "to", "approximate", "natural", "language", "perturbations", "for", "testing", "the", "robustness", "issues", "in", "task", "-", "oriented", "dialog", ".", "four", "data", "augmentation", "approaches", "covering", "the", "three", "aspects", "are", "assembled", "in", "laug", ",", "which", "reveals", "critical", "robustness", "issues", "in", "state", "-", "of", "-", "the", "-", "art", "models", ".", "the", "augmented", "dataset", "through", "laug", "can", "be", "used", "to", "facilitate", "future", "research", "on", "the", "robustness", "testing", "of", "language", "understanding", "in", "task", "-", "oriented", "dialog", "."]}, {"venue": "ACL", "title": "Boundary Smoothing for Named Entity Recognition", "abstract": "Neural named entity recognition (NER) models may easily encounter the over-confidence issue, which degrades the performance and calibration. Inspired by label smoothing and driven by the ambiguity of boundary annotation in NER engineering, we propose boundary smoothing as a regularization technique for span-based neural NER models. It re-assigns entity probabilities from annotated spans to the surrounding ones. Built on a simple but strong baseline, our model achieves results better than or competitive with previous state-of-the-art systems on eight well-known NER benchmarks. Further empirical analysis suggests that boundary smoothing effectively mitigates over-confidence, improves model calibration, and brings flatter neural minima and more smoothed loss landscapes.", "doc_id": "b4123782dafe5299cbb27c4b16df2264", "publication_year": 2022, "sentences": ["neural named entity recognition ( ner ) models may easily encounter the over - confidence issue , which degrades the performance and calibration .", "inspired by label smoothing and driven by the ambiguity of boundary annotation in ner engineering , we propose boundary smoothing as a regularization technique for span - based neural ner models .", "it re - assigns entity probabilities from annotated spans to the surrounding ones .", "built on a simple but strong baseline , our model achieves results better than or competitive with previous state - of - the - art systems on eight well - known ner benchmarks .", "further empirical analysis suggests that boundary smoothing effectively mitigates over - confidence , improves model calibration , and brings flatter neural minima and more smoothed loss landscapes ."], "events": [{"event_type": "RWF", "arguments": [{"text": "neural named entity recognition ( ner ) models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["neural", "named", "entity", "recognition", "models"], "offsets": [0, 1, 2, 3, 7]}, {"text": "over - confidence issue", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["over", "-", "confidence", "issue"], "offsets": [12, 13, 14, 15]}], "trigger": {"text": "encounter", "tokens": ["encounter"], "offsets": [10]}}, {"event_type": "MDS", "arguments": [{"text": "entity probabilities", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["entity", "probabilities"], "offsets": [60, 61]}, {"text": "from annotated spans to the surrounding ones", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "annotated", "spans", "to", "the", "surrounding", "ones"], "offsets": [62, 63, 64, 65, 66, 67, 68]}], "trigger": {"text": "re - assigns", "tokens": ["re", "-", "assigns"], "offsets": [57, 58, 59]}}, {"event_type": "CMP", "arguments": [{"text": "previous state - of - the - art systems", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["previous", "state", "-", "of", "-", "the", "-", "art", "systems"], "offsets": [87, 88, 89, 90, 91, 92, 93, 94, 95]}, {"text": "on eight well - known ner benchmarks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "eight", "well", "-", "known", "ner", "benchmarks"], "offsets": [96, 97, 98, 99, 100, 101, 102]}, {"text": "results", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["results"], "offsets": [81]}, {"text": "better than or competitive", "nugget_type": "STR", "argument_type": "Result", "tokens": ["better", "than", "or", "competitive"], "offsets": [82, 83, 84, 85]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [80]}}, {"event_type": "FAC", "arguments": [{"text": "over - confidence", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["over", "-", "confidence"], "offsets": [113, 114, 115]}, {"text": "boundary smoothing", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["boundary", "smoothing"], "offsets": [109, 110]}], "trigger": {"text": "mitigates", "tokens": ["mitigates"], "offsets": [112]}}, {"event_type": "FAC", "arguments": [{"text": "model calibration", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["model", "calibration"], "offsets": [118, 119]}, {"text": "boundary smoothing", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["boundary", "smoothing"], "offsets": [109, 110]}], "trigger": {"text": "improves", "tokens": ["improves"], "offsets": [117]}}, {"event_type": "FAC", "arguments": [{"text": "neural minima", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["neural", "minima"], "offsets": [124, 125]}, {"text": "loss landscapes", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["loss", "landscapes"], "offsets": [129, 130]}, {"text": "flatter", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["flatter"], "offsets": [123]}, {"text": "more smoothed", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["more", "smoothed"], "offsets": [127, 128]}, {"text": "boundary smoothing", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["boundary", "smoothing"], "offsets": [109, 110]}], "trigger": {"text": "brings", "tokens": ["brings"], "offsets": [122]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [40]}, {"text": "boundary smoothing", "nugget_type": "APP", "argument_type": "Content", "tokens": ["boundary", "smoothing"], "offsets": [42, 43]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [41]}}], "document": ["neural", "named", "entity", "recognition", "(", "ner", ")", "models", "may", "easily", "encounter", "the", "over", "-", "confidence", "issue", ",", "which", "degrades", "the", "performance", "and", "calibration", ".", "inspired", "by", "label", "smoothing", "and", "driven", "by", "the", "ambiguity", "of", "boundary", "annotation", "in", "ner", "engineering", ",", "we", "propose", "boundary", "smoothing", "as", "a", "regularization", "technique", "for", "span", "-", "based", "neural", "ner", "models", ".", "it", "re", "-", "assigns", "entity", "probabilities", "from", "annotated", "spans", "to", "the", "surrounding", "ones", ".", "built", "on", "a", "simple", "but", "strong", "baseline", ",", "our", "model", "achieves", "results", "better", "than", "or", "competitive", "with", "previous", "state", "-", "of", "-", "the", "-", "art", "systems", "on", "eight", "well", "-", "known", "ner", "benchmarks", ".", "further", "empirical", "analysis", "suggests", "that", "boundary", "smoothing", "effectively", "mitigates", "over", "-", "confidence", ",", "improves", "model", "calibration", ",", "and", "brings", "flatter", "neural", "minima", "and", "more", "smoothed", "loss", "landscapes", "."]}, {"venue": "ACL", "title": "Selection Bias Explorations and Debias Methods for Natural Language Sentence Matching Datasets", "abstract": "Natural Language Sentence Matching (NLSM) has gained substantial attention from both academics and the industry, and rich public datasets contribute a lot to this process. However, biased datasets can also hurt the generalization performance of trained models and give untrustworthy evaluation results. For many NLSM datasets, the providers select some pairs of sentences into the datasets, and this sampling procedure can easily bring unintended pattern, i.e., selection bias. One example is the QuoraQP dataset, where some content-independent naive features are unreasonably predictive. Such features are the reflection of the selection bias and termed as the \u201cleakage features.\u201d In this paper, we investigate the problem of selection bias on six NLSM datasets and find that four out of them are significantly biased. We further propose a training and evaluation framework to alleviate the bias. Experimental results on QuoraQP suggest that the proposed framework can improve the generalization ability of trained models, and give more trustworthy evaluation results for real-world adoptions.", "doc_id": "2cf3f07cdcefd7229cc35b49dff61c6f", "publication_year": 2019, "sentences": ["natural language sentence matching ( nlsm ) has gained substantial attention from both academics and the industry , and rich public datasets contribute a lot to this process .", "however , biased datasets can also hurt the generalization performance of trained models and give untrustworthy evaluation results .", "for many nlsm datasets , the providers select some pairs of sentences into the datasets , and this sampling procedure can easily bring unintended pattern , i . e . , selection bias .", "one example is the quoraqp dataset , where some content - independent naive features are unreasonably predictive .", "such features are the reflection of the selection bias and termed as the \u201c leakage features . \u201d", "in this paper , we investigate the problem of selection bias on six nlsm datasets and find that four out of them are significantly biased .", "we further propose a training and evaluation framework to alleviate the bias .", "experimental results on quoraqp suggest that the proposed framework can improve the generalization ability of trained models , and give more trustworthy evaluation results for real - world adoptions ."], "events": [{"event_type": "ITT", "arguments": [{"text": "rich public datasets", "nugget_type": "DST", "argument_type": "Target", "tokens": ["rich", "public", "datasets"], "offsets": [19, 20, 21]}], "trigger": {"text": "contribute", "tokens": ["contribute"], "offsets": [22]}}, {"event_type": "RWF", "arguments": [{"text": "generalization performance of trained models", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["generalization", "performance", "of", "trained", "models"], "offsets": [37, 38, 39, 40, 41]}, {"text": "biased datasets", "nugget_type": "DST", "argument_type": "Fault", "tokens": ["biased", "datasets"], "offsets": [31, 32]}], "trigger": {"text": "hurt", "tokens": ["hurt"], "offsets": [35]}}, {"event_type": "RWF", "arguments": [{"text": "untrustworthy evaluation results", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["untrustworthy", "evaluation", "results"], "offsets": [44, 45, 46]}, {"text": "biased datasets", "nugget_type": "DST", "argument_type": "Concern", "tokens": ["biased", "datasets"], "offsets": [31, 32]}], "trigger": {"text": "give", "tokens": ["give"], "offsets": [43]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [122]}, {"text": "problem of selection bias on six nlsm datasets", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["problem", "of", "selection", "bias", "on", "six", "natural", "language", "sentence", "matching", "datasets"], "offsets": [125, 126, 127, 128, 129, 130, 0, 1, 2, 3, 132]}], "trigger": {"text": "investigate", "tokens": ["investigate"], "offsets": [123]}}, {"event_type": "FIN", "arguments": [{"text": "biased", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["biased"], "offsets": [142]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [134]}}, {"event_type": "FAC", "arguments": [{"text": "four out of them", "nugget_type": "DST", "argument_type": "Subject", "tokens": ["four", "out", "of", "six", "natural", "language", "sentence", "matching", "datasets"], "offsets": [136, 137, 138, 130, 0, 1, 2, 3, 132]}, {"text": "significantly", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significantly"], "offsets": [141]}], "trigger": {"text": "biased", "tokens": ["biased"], "offsets": [142]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [144]}, {"text": "training and evaluation framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["training", "and", "evaluation", "framework"], "offsets": [148, 149, 150, 151]}, {"text": "alleviate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["alleviate"], "offsets": [153]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [146]}}, {"event_type": "PUR", "arguments": [{"text": "bias", "nugget_type": "WEA", "argument_type": "Aim", "tokens": ["bias"], "offsets": [155]}], "trigger": {"text": "alleviate", "tokens": ["alleviate"], "offsets": [153]}}, {"event_type": "FIN", "arguments": [{"text": "improve", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["improve"], "offsets": [167]}, {"text": "give", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["give"], "offsets": [176]}], "trigger": {"text": "suggest", "tokens": ["suggest"], "offsets": [161]}}, {"event_type": "CMP", "arguments": [{"text": "training and evaluation framework", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["training", "and", "evaluation", "framework"], "offsets": [148, 149, 150, 151]}, {"text": "improve", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improve"], "offsets": [167]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [167]}}, {"event_type": "FAC", "arguments": [{"text": "training and evaluation framework", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["training", "and", "evaluation", "framework"], "offsets": [148, 149, 150, 151]}, {"text": "more trustworthy evaluation results", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["more", "trustworthy", "evaluation", "results"], "offsets": [177, 178, 179, 180]}, {"text": "real - world adoptions", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["real", "-", "world", "adoptions"], "offsets": [182, 183, 184, 185]}], "trigger": {"text": "give", "tokens": ["give"], "offsets": [176]}}], "document": ["natural", "language", "sentence", "matching", "(", "nlsm", ")", "has", "gained", "substantial", "attention", "from", "both", "academics", "and", "the", "industry", ",", "and", "rich", "public", "datasets", "contribute", "a", "lot", "to", "this", "process", ".", "however", ",", "biased", "datasets", "can", "also", "hurt", "the", "generalization", "performance", "of", "trained", "models", "and", "give", "untrustworthy", "evaluation", "results", ".", "for", "many", "nlsm", "datasets", ",", "the", "providers", "select", "some", "pairs", "of", "sentences", "into", "the", "datasets", ",", "and", "this", "sampling", "procedure", "can", "easily", "bring", "unintended", "pattern", ",", "i", ".", "e", ".", ",", "selection", "bias", ".", "one", "example", "is", "the", "quoraqp", "dataset", ",", "where", "some", "content", "-", "independent", "naive", "features", "are", "unreasonably", "predictive", ".", "such", "features", "are", "the", "reflection", "of", "the", "selection", "bias", "and", "termed", "as", "the", "\u201c", "leakage", "features", ".", "\u201d", "in", "this", "paper", ",", "we", "investigate", "the", "problem", "of", "selection", "bias", "on", "six", "nlsm", "datasets", "and", "find", "that", "four", "out", "of", "them", "are", "significantly", "biased", ".", "we", "further", "propose", "a", "training", "and", "evaluation", "framework", "to", "alleviate", "the", "bias", ".", "experimental", "results", "on", "quoraqp", "suggest", "that", "the", "proposed", "framework", "can", "improve", "the", "generalization", "ability", "of", "trained", "models", ",", "and", "give", "more", "trustworthy", "evaluation", "results", "for", "real", "-", "world", "adoptions", "."]}, {"venue": "ACL", "title": "Multi-Task Semantic Dependency Parsing with Policy Gradient for Learning Easy-First Strategies", "abstract": "In Semantic Dependency Parsing (SDP), semantic relations form directed acyclic graphs, rather than trees. We propose a new iterative predicate selection (IPS) algorithm for SDP. Our IPS algorithm combines the graph-based and transition-based parsing approaches in order to handle multiple semantic head words. We train the IPS model using a combination of multi-task learning and task-specific policy gradient training. Trained this way, IPS achieves a new state of the art on the SemEval 2015 Task 18 datasets. Furthermore, we observe that policy gradient training learns an easy-first strategy.", "doc_id": "0d0ec1031b6982bd301f190b12628845", "publication_year": 2019, "sentences": ["in semantic dependency parsing ( sdp ) , semantic relations form directed acyclic graphs , rather than trees .\\nwe propose a new iterative predicate selection ( ips ) algorithm for sdp .\\nour ips algorithm combines the graph - based and transition - based parsing approaches in order to handle multiple semantic head words .\\nwe train the ips model using a combination of multi - task learning and task - specific policy gradient training .\\ntrained this way , ips achieves a new state of the art on the semeval 2015 task 18 datasets .\\nfurthermore , we observe that policy gradient training learns an easy - first strategy ."], "events": [{"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [94]}, {"text": "learns", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["learns"], "offsets": [100]}], "trigger": {"text": "observe", "tokens": ["observe"], "offsets": [95]}}, {"event_type": "FAC", "arguments": [{"text": "policy gradient training", "nugget_type": "MOD", "argument_type": "Subject", "tokens": ["policy", "gradient", "training"], "offsets": [97, 98, 99]}, {"text": "easy - first strategy", "nugget_type": "APP", "argument_type": "Object", "tokens": ["easy", "-", "first", "strategy"], "offsets": [102, 103, 104, 105]}], "trigger": {"text": "learns", "tokens": ["learns"], "offsets": [100]}}], "document": ["in", "semantic", "dependency", "parsing", "(", "sdp", ")", ",", "semantic", "relations", "form", "directed", "acyclic", "graphs", ",", "rather", "than", "trees", ".\\nwe", "propose", "a", "new", "iterative", "predicate", "selection", "(", "ips", ")", "algorithm", "for", "sdp", ".\\nour", "ips", "algorithm", "combines", "the", "graph", "-", "based", "and", "transition", "-", "based", "parsing", "approaches", "in", "order", "to", "handle", "multiple", "semantic", "head", "words", ".\\nwe", "train", "the", "ips", "model", "using", "a", "combination", "of", "multi", "-", "task", "learning", "and", "task", "-", "specific", "policy", "gradient", "training", ".\\ntrained", "this", "way", ",", "ips", "achieves", "a", "new", "state", "of", "the", "art", "on", "the", "semeval", "2015", "task", "18", "datasets", ".\\nfurthermore", ",", "we", "observe", "that", "policy", "gradient", "training", "learns", "an", "easy", "-", "first", "strategy", "."]}, {"venue": "ACL", "title": "BabyWalk: Going Farther in Vision-and-Language Navigation by Taking Baby Steps", "abstract": "Learning to follow instructions is of fundamental importance to autonomous agents for vision-and-language navigation (VLN). In this paper, we study how an agent can navigate long paths when learning from a corpus that consists of shorter ones. We show that existing state-of-the-art agents do not generalize well. To this end, we propose BabyWalk, a new VLN agent that is learned to navigate by decomposing long instructions into shorter ones (BabySteps) and completing them sequentially. A special design memory buffer is used by the agent to turn its past experiences into contexts for future steps. The learning process is composed of two phases. In the first phase, the agent uses imitation learning from demonstration to accomplish BabySteps. In the second phase, the agent uses curriculum-based reinforcement learning to maximize rewards on navigation tasks with increasingly longer instructions. We create two new benchmark datasets (of long navigation tasks) and use them in conjunction with existing ones to examine BabyWalk\u2019s generalization ability. Empirical results show that BabyWalk achieves state-of-the-art results on several metrics, in particular, is able to follow long instructions better. The codes and the datasets are released on our project page: https://github.com/Sha-Lab/babywalk.", "doc_id": "b80281059026bd5b999abd56011c1c9a", "publication_year": 2020, "sentences": ["learning to follow instructions is of fundamental importance to autonomous agents for vision - and - language navigation ( vln ) .", "in this paper , we study how an agent can navigate long paths when learning from a corpus that consists of shorter ones .", "we show that existing state - of - the - art agents do not generalize well .", "to this end , we propose babywalk , a new vln agent that is learned to navigate by decomposing long instructions into shorter ones ( babysteps ) and completing them sequentially .", "a special design memory buffer is used by the agent to turn its past experiences into contexts for future steps .", "the learning process is composed of two phases .", "in the first phase , the agent uses imitation learning from demonstration to accomplish babysteps .", "in the second phase , the agent uses curriculum - based reinforcement learning to maximize rewards on navigation tasks with increasingly longer instructions .", "we create two new benchmark datasets ( of long navigation tasks ) and use them in conjunction with existing ones to examine babywalk \u2019 s generalization ability .", "empirical results show that babywalk achieves state - of - the - art results on several metrics , in particular , is able to follow long instructions better .", "the codes and the datasets are released on our project page : https : / / github . com / sha - lab / babywalk ."], "events": [{"event_type": "ITT", "arguments": [{"text": "vision - and - language navigation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["vision", "-", "and", "-", "language", "navigation"], "offsets": [12, 13, 14, 15, 16, 17]}], "trigger": {"text": "importance", "tokens": ["importance"], "offsets": [7]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [26]}, {"text": "agent", "nugget_type": "APP", "argument_type": "Content", "tokens": ["agent"], "offsets": [30]}], "trigger": {"text": "study", "tokens": ["study"], "offsets": [27]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [46]}, {"text": "not generalize well", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["not", "generalize", "well"], "offsets": [59, 60, 61]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [47]}}, {"event_type": "FAC", "arguments": [{"text": "existing state - of - the - art agents", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["existing", "state", "-", "of", "-", "the", "-", "art", "agents"], "offsets": [49, 50, 51, 52, 53, 54, 55, 56, 57]}], "trigger": {"text": "not generalize well", "tokens": ["not", "generalize", "well"], "offsets": [59, 60, 61]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [67]}, {"text": "babywalk", "nugget_type": "APP", "argument_type": "Content", "tokens": ["babywalk"], "offsets": [69]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [68]}}, {"event_type": "MDS", "arguments": [{"text": "long instructions", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["long", "instructions"], "offsets": [82, 83]}, {"text": "shorter ones", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["shorter", "ones"], "offsets": [85, 86]}, {"text": "learned", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["learned"], "offsets": [77]}], "trigger": {"text": "decomposing", "tokens": ["decomposing"], "offsets": [81]}}, {"event_type": "MDS", "arguments": [{"text": "shorter ones", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["shorter", "ones"], "offsets": [85, 86]}, {"text": "sequentially", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["sequentially"], "offsets": [93]}, {"text": "learned", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["learned"], "offsets": [77]}], "trigger": {"text": "completing", "tokens": ["completing"], "offsets": [91]}}, {"event_type": "MDS", "arguments": [{"text": "memory buffer", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["memory", "buffer"], "offsets": [98, 99]}, {"text": "turn", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["turn"], "offsets": [106]}], "trigger": {"text": "used", "tokens": ["used"], "offsets": [101]}}, {"event_type": "PUR", "arguments": [{"text": "past experiences", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["past", "experiences"], "offsets": [108, 109]}, {"text": "into contexts", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["into", "contexts"], "offsets": [110, 111]}], "trigger": {"text": "turn", "tokens": ["turn"], "offsets": [106]}}, {"event_type": "MDS", "arguments": [{"text": "accomplish", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["accomplish"], "offsets": [138]}, {"text": "imitation learning", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["imitation", "learning"], "offsets": [133, 134]}], "trigger": {"text": "uses", "tokens": ["uses"], "offsets": [132]}}, {"event_type": "PUR", "arguments": [{"text": "babysteps", "nugget_type": "MOD", "argument_type": "Aim", "tokens": ["babysteps"], "offsets": [139]}], "trigger": {"text": "accomplish", "tokens": ["accomplish"], "offsets": [138]}}, {"event_type": "MDS", "arguments": [{"text": "maximize", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["maximize"], "offsets": [155]}, {"text": "curriculum - based reinforcement learning", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["curriculum", "-", "based", "reinforcement", "learning"], "offsets": [149, 150, 151, 152, 153]}], "trigger": {"text": "uses", "tokens": ["uses"], "offsets": [148]}}, {"event_type": "PUR", "arguments": [{"text": "rewards", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["rewards"], "offsets": [156]}], "trigger": {"text": "maximize", "tokens": ["maximize"], "offsets": [155]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [165]}, {"text": "two new benchmark datasets", "nugget_type": "DST", "argument_type": "Content", "tokens": ["two", "new", "benchmark", "datasets"], "offsets": [167, 168, 169, 170]}], "trigger": {"text": "create", "tokens": ["create"], "offsets": [166]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [165]}, {"text": "two new benchmark datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["two", "new", "benchmark", "datasets"], "offsets": [167, 168, 169, 170]}, {"text": "in conjunction with existing ones", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "conjunction", "with", "existing", "ones"], "offsets": [180, 181, 182, 183, 184]}, {"text": "examine", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["examine"], "offsets": [186]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [178]}}, {"event_type": "PUR", "arguments": [{"text": "babywalk \u2019 s generalization ability", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["babywalk", "\u2019", "s", "generalization", "ability"], "offsets": [187, 188, 189, 190, 191]}], "trigger": {"text": "examine", "tokens": ["examine"], "offsets": [186]}}, {"event_type": "FIN", "arguments": [{"text": "achieves", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["achieves"], "offsets": [198]}, {"text": "follow", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["follow"], "offsets": [217]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [195]}}, {"event_type": "FAC", "arguments": [{"text": "babywalk", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["babywalk"], "offsets": [197]}, {"text": "state - of - the - art results", "nugget_type": "STR", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art", "results"], "offsets": [199, 200, 201, 202, 203, 204, 205, 206]}, {"text": "on several metrics", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "several", "metrics"], "offsets": [207, 208, 209]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [198]}}, {"event_type": "FAC", "arguments": [{"text": "babywalk", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["babywalk"], "offsets": [197]}, {"text": "long instructions", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["long", "instructions"], "offsets": [218, 219]}, {"text": "better", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["better"], "offsets": [220]}], "trigger": {"text": "follow", "tokens": ["follow"], "offsets": [217]}}, {"event_type": "PUR", "arguments": [{"text": "navigate", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["navigate"], "offsets": [79]}], "trigger": {"text": "learned", "tokens": ["learned"], "offsets": [77]}}], "document": ["learning", "to", "follow", "instructions", "is", "of", "fundamental", "importance", "to", "autonomous", "agents", "for", "vision", "-", "and", "-", "language", "navigation", "(", "vln", ")", ".", "in", "this", "paper", ",", "we", "study", "how", "an", "agent", "can", "navigate", "long", "paths", "when", "learning", "from", "a", "corpus", "that", "consists", "of", "shorter", "ones", ".", "we", "show", "that", "existing", "state", "-", "of", "-", "the", "-", "art", "agents", "do", "not", "generalize", "well", ".", "to", "this", "end", ",", "we", "propose", "babywalk", ",", "a", "new", "vln", "agent", "that", "is", "learned", "to", "navigate", "by", "decomposing", "long", "instructions", "into", "shorter", "ones", "(", "babysteps", ")", "and", "completing", "them", "sequentially", ".", "a", "special", "design", "memory", "buffer", "is", "used", "by", "the", "agent", "to", "turn", "its", "past", "experiences", "into", "contexts", "for", "future", "steps", ".", "the", "learning", "process", "is", "composed", "of", "two", "phases", ".", "in", "the", "first", "phase", ",", "the", "agent", "uses", "imitation", "learning", "from", "demonstration", "to", "accomplish", "babysteps", ".", "in", "the", "second", "phase", ",", "the", "agent", "uses", "curriculum", "-", "based", "reinforcement", "learning", "to", "maximize", "rewards", "on", "navigation", "tasks", "with", "increasingly", "longer", "instructions", ".", "we", "create", "two", "new", "benchmark", "datasets", "(", "of", "long", "navigation", "tasks", ")", "and", "use", "them", "in", "conjunction", "with", "existing", "ones", "to", "examine", "babywalk", "\u2019", "s", "generalization", "ability", ".", "empirical", "results", "show", "that", "babywalk", "achieves", "state", "-", "of", "-", "the", "-", "art", "results", "on", "several", "metrics", ",", "in", "particular", ",", "is", "able", "to", "follow", "long", "instructions", "better", ".", "the", "codes", "and", "the", "datasets", "are", "released", "on", "our", "project", "page", ":", "https", ":", "/", "/", "github", ".", "com", "/", "sha", "-", "lab", "/", "babywalk", "."]}, {"venue": "ACL", "title": "Enhanced Multi-Channel Graph Convolutional Network for Aspect Sentiment Triplet Extraction", "abstract": "Aspect Sentiment Triplet Extraction (ASTE) is an emerging sentiment analysis task. Most of the existing studies focus on devising a new tagging scheme that enables the model to extract the sentiment triplets in an end-to-end fashion. However, these methods ignore the relations between words for ASTE task. In this paper, we propose an Enhanced Multi-Channel Graph Convolutional Network model (EMC-GCN) to fully utilize the relations between words. Specifically, we first define ten types of relations for ASTE task, and then adopt a biaffine attention module to embed these relations as an adjacent tensor between words in a sentence. After that, our EMC-GCN transforms the sentence into a multi-channel graph by treating words and the relation adjacent tensor as nodes and edges, respectively. Thus, relation-aware node representations can be learnt. Furthermore, we consider diverse linguistic features to enhance our EMC-GCN model. Finally, we design an effective refining strategy on EMC-GCN for word-pair representation refinement, which considers the implicit results of aspect and opinion extraction when determining whether word pairs match or not. Extensive experimental results on the benchmark datasets demonstrate that the effectiveness and robustness of our proposed model, which outperforms state-of-the-art methods significantly.", "doc_id": "a2b4bf83cb4944f6a2835a96c04c2181", "publication_year": 2022, "sentences": ["aspect sentiment triplet extraction ( aste ) is an emerging sentiment analysis task .", "most of the existing studies focus on devising a new tagging scheme that enables the model to extract the sentiment triplets in an end - to - end fashion .", "however , these methods ignore the relations between words for aste task .", "in this paper , we propose an enhanced multi - channel graph convolutional network model ( emc - gcn ) to fully utilize the relations between words .", "specifically , we first define ten types of relations for aste task , and then adopt a biaffine attention module to embed these relations as an adjacent tensor between words in a sentence .", "after that , our emc - gcn transforms the sentence into a multi - channel graph by treating words and the relation adjacent tensor as nodes and edges , respectively .", "thus , relation - aware node representations can be learnt .", "furthermore , we consider diverse linguistic features to enhance our emc - gcn model .", "finally , we design an effective refining strategy on emc - gcn for word - pair representation refinement , which considers the implicit results of aspect and opinion extraction when determining whether word pairs match or not .", "extensive experimental results on the benchmark datasets demonstrate that the effectiveness and robustness of our proposed model , which outperforms state - of - the - art methods significantly ."], "events": [{"event_type": "ITT", "arguments": [{"text": "aspect sentiment triplet extraction", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["aspect", "sentiment", "triplet", "extraction"], "offsets": [0, 1, 2, 3]}], "trigger": {"text": "task", "tokens": ["task"], "offsets": [12]}}, {"event_type": "RWS", "arguments": [{"text": "new tagging scheme", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["new", "tagging", "scheme"], "offsets": [23, 24, 25]}, {"text": "sentiment triplets", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["sentiment", "triplets"], "offsets": [33, 34]}, {"text": "in an end - to - end fashion", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "an", "end", "-", "to", "-", "end", "fashion"], "offsets": [35, 36, 37, 38, 39, 40, 41, 42]}], "trigger": {"text": "extract", "tokens": ["extract"], "offsets": [31]}}, {"event_type": "RWF", "arguments": [{"text": "relations between words", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["relations", "between", "words"], "offsets": [50, 51, 52]}, {"text": "aste task", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["aspect", "sentiment", "triplet", "extraction", "task"], "offsets": [0, 1, 2, 3, 55]}], "trigger": {"text": "ignore", "tokens": ["ignore"], "offsets": [48]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [61]}, {"text": "enhanced multi - channel graph convolutional network model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["enhanced", "multi", "-", "channel", "graph", "convolutional", "network", "model"], "offsets": [64, 65, 66, 67, 68, 69, 70, 71]}, {"text": "fully utilize", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["fully", "utilize"], "offsets": [78, 79]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [62]}}, {"event_type": "PUR", "arguments": [{"text": "relations between words", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["relations", "between", "words"], "offsets": [81, 82, 83]}], "trigger": {"text": "fully utilize", "tokens": ["fully", "utilize"], "offsets": [78, 79]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [87]}, {"text": "ten types of relations", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["ten", "types", "of", "relations"], "offsets": [90, 91, 92, 93]}, {"text": "aste task", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["aspect", "sentiment", "triplet", "extraction", "task"], "offsets": [0, 1, 2, 3, 96]}], "trigger": {"text": "define", "tokens": ["define"], "offsets": [89]}}, {"event_type": "MDS", "arguments": [{"text": "biaffine attention module", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["biaffine", "attention", "module"], "offsets": [102, 103, 104]}, {"text": "adjacent tensor", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["adjacent", "tensor"], "offsets": [111, 112]}, {"text": "ten types of relations", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["ten", "types", "of", "relations"], "offsets": [90, 91, 92, 93]}], "trigger": {"text": "embed", "tokens": ["embed"], "offsets": [106]}}, {"event_type": "MDS", "arguments": [{"text": "transforms", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["transforms"], "offsets": [126]}, {"text": "nodes", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["nodes"], "offsets": [144]}, {"text": "edges", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["edges"], "offsets": [146]}, {"text": "words", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["words"], "offsets": [137]}, {"text": "relation adjacent tensor", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["relation", "adjacent", "tensor"], "offsets": [140, 141, 142]}], "trigger": {"text": "treating", "tokens": ["treating"], "offsets": [136]}}, {"event_type": "PUR", "arguments": [{"text": "sentence", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["sentence"], "offsets": [128]}, {"text": "into a multi - channel graph", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["into", "a", "multi", "-", "channel", "graph"], "offsets": [129, 130, 131, 132, 133, 134]}], "trigger": {"text": "transforms", "tokens": ["transforms"], "offsets": [126]}}, {"event_type": "MDS", "arguments": [{"text": "relation - aware node representations", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["relation", "-", "aware", "node", "representations"], "offsets": [152, 153, 154, 155, 156]}], "trigger": {"text": "learnt", "tokens": ["learnt"], "offsets": [159]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [163]}, {"text": "diverse linguistic features", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["diverse", "linguistic", "features"], "offsets": [165, 166, 167]}, {"text": "enhance", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["enhance"], "offsets": [169]}], "trigger": {"text": "consider", "tokens": ["consider"], "offsets": [164]}}, {"event_type": "PUR", "arguments": [{"text": "emc - gcn model", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["enhanced", "multi", "-", "channel", "graph", "convolutional", "network", "model", "model"], "offsets": [64, 65, 66, 67, 68, 69, 70, 71, 174]}], "trigger": {"text": "enhance", "tokens": ["enhance"], "offsets": [169]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [178]}, {"text": "refining strategy", "nugget_type": "APP", "argument_type": "Content", "tokens": ["refining", "strategy"], "offsets": [182, 183]}, {"text": "word - pair representation refinement", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["word", "-", "pair", "representation", "refinement"], "offsets": [189, 190, 191, 192, 193]}], "trigger": {"text": "design", "tokens": ["design"], "offsets": [179]}}, {"event_type": "MDS", "arguments": [{"text": "determining", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["determining"], "offsets": [206]}, {"text": "implicit results", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["implicit", "results"], "offsets": [198, 199]}, {"text": "aspect and opinion extraction", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["aspect", "and", "opinion", "extraction"], "offsets": [201, 202, 203, 204]}], "trigger": {"text": "considers", "tokens": ["considers"], "offsets": [196]}}, {"event_type": "PUR", "arguments": [{"text": "whether word pairs match or not", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["whether", "word", "pairs", "match", "or", "not"], "offsets": [207, 208, 209, 210, 211, 212]}], "trigger": {"text": "determining", "tokens": ["determining"], "offsets": [206]}}, {"event_type": "CMP", "arguments": [{"text": "state - of - the - art methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art", "methods"], "offsets": [234, 235, 236, 237, 238, 239, 240, 241]}, {"text": "benchmark datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["benchmark", "datasets"], "offsets": [219, 220]}, {"text": "significantly", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significantly"], "offsets": [242]}, {"text": "effectiveness", "nugget_type": "STR", "argument_type": "Result", "tokens": ["effectiveness"], "offsets": [224]}, {"text": "robustness", "nugget_type": "STR", "argument_type": "Result", "tokens": ["robustness"], "offsets": [226]}, {"text": "enhanced multi - channel graph convolutional network model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["enhanced", "multi", "-", "channel", "graph", "convolutional", "network", "model"], "offsets": [64, 65, 66, 67, 68, 69, 70, 71]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [233]}}, {"event_type": "FIN", "arguments": [{"text": "outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforms"], "offsets": [233]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [221]}}], "document": ["aspect", "sentiment", "triplet", "extraction", "(", "aste", ")", "is", "an", "emerging", "sentiment", "analysis", "task", ".", "most", "of", "the", "existing", "studies", "focus", "on", "devising", "a", "new", "tagging", "scheme", "that", "enables", "the", "model", "to", "extract", "the", "sentiment", "triplets", "in", "an", "end", "-", "to", "-", "end", "fashion", ".", "however", ",", "these", "methods", "ignore", "the", "relations", "between", "words", "for", "aste", "task", ".", "in", "this", "paper", ",", "we", "propose", "an", "enhanced", "multi", "-", "channel", "graph", "convolutional", "network", "model", "(", "emc", "-", "gcn", ")", "to", "fully", "utilize", "the", "relations", "between", "words", ".", "specifically", ",", "we", "first", "define", "ten", "types", "of", "relations", "for", "aste", "task", ",", "and", "then", "adopt", "a", "biaffine", "attention", "module", "to", "embed", "these", "relations", "as", "an", "adjacent", "tensor", "between", "words", "in", "a", "sentence", ".", "after", "that", ",", "our", "emc", "-", "gcn", "transforms", "the", "sentence", "into", "a", "multi", "-", "channel", "graph", "by", "treating", "words", "and", "the", "relation", "adjacent", "tensor", "as", "nodes", "and", "edges", ",", "respectively", ".", "thus", ",", "relation", "-", "aware", "node", "representations", "can", "be", "learnt", ".", "furthermore", ",", "we", "consider", "diverse", "linguistic", "features", "to", "enhance", "our", "emc", "-", "gcn", "model", ".", "finally", ",", "we", "design", "an", "effective", "refining", "strategy", "on", "emc", "-", "gcn", "for", "word", "-", "pair", "representation", "refinement", ",", "which", "considers", "the", "implicit", "results", "of", "aspect", "and", "opinion", "extraction", "when", "determining", "whether", "word", "pairs", "match", "or", "not", ".", "extensive", "experimental", "results", "on", "the", "benchmark", "datasets", "demonstrate", "that", "the", "effectiveness", "and", "robustness", "of", "our", "proposed", "model", ",", "which", "outperforms", "state", "-", "of", "-", "the", "-", "art", "methods", "significantly", "."]}, {"venue": "ACL", "title": "Compositional Semantic Parsing across Graphbanks", "abstract": "Most semantic parsers that map sentences to graph-based meaning representations are hand-designed for specific graphbanks. We present a compositional neural semantic parser which achieves, for the first time, competitive accuracies across a diverse range of graphbanks. Incorporating BERT embeddings and multi-task learning improves the accuracy further, setting new states of the art on DM, PAS, PSD, AMR 2015 and EDS.", "doc_id": "91265be412f7b18129f6790a4fa5e82b", "publication_year": 2019, "sentences": ["most semantic parsers that map sentences to graph - based meaning representations are hand - designed for specific graphbanks .", "we present a compositional neural semantic parser which achieves , for the first time , competitive accuracies across a diverse range of graphbanks .", "incorporating bert embeddings and multi - task learning improves the accuracy further , setting new states of the art on dm , pas , psd , amr 2015 and eds ."], "events": [{"event_type": "ITT", "arguments": [{"text": "specific graphbanks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["specific", "graphbanks"], "offsets": [17, 18]}], "trigger": {"text": "hand - designed", "tokens": ["hand", "-", "designed"], "offsets": [13, 14, 15]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [20]}, {"text": "compositional neural semantic parser", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["compositional", "neural", "semantic", "parser"], "offsets": [23, 24, 25, 26]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [21]}}, {"event_type": "FAC", "arguments": [{"text": "across a diverse range of graphbanks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["across", "a", "diverse", "range", "of", "graphbanks"], "offsets": [37, 38, 39, 40, 41, 42]}, {"text": "compositional neural semantic parser", "nugget_type": "MOD", "argument_type": "Subject", "tokens": ["compositional", "neural", "semantic", "parser"], "offsets": [23, 24, 25, 26]}, {"text": "competitive accuracies", "nugget_type": "STR", "argument_type": "Object", "tokens": ["competitive", "accuracies"], "offsets": [35, 36]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [28]}}, {"event_type": "RWS", "arguments": [{"text": "semantic parsers", "nugget_type": "MOD", "argument_type": "Subject", "tokens": ["semantic", "parsers"], "offsets": [1, 2]}, {"text": "sentences", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["sentences"], "offsets": [5]}, {"text": "graph - based meaning representations", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["graph", "-", "based", "meaning", "representations"], "offsets": [7, 8, 9, 10, 11]}], "trigger": {"text": "map", "tokens": ["map"], "offsets": [4]}}, {"event_type": "WKS", "arguments": [{"text": "bert embeddings", "nugget_type": "APP", "argument_type": "Content", "tokens": ["bert", "embeddings"], "offsets": [45, 46]}, {"text": "multi - task learning", "nugget_type": "APP", "argument_type": "Content", "tokens": ["multi", "-", "task", "learning"], "offsets": [48, 49, 50, 51]}, {"text": "improves", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improves"], "offsets": [52]}, {"text": "setting", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["setting"], "offsets": [57]}], "trigger": {"text": "incorporating", "tokens": ["incorporating"], "offsets": [44]}}, {"event_type": "PUR", "arguments": [{"text": "accuracy", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["accuracy"], "offsets": [54]}], "trigger": {"text": "improves", "tokens": ["improves"], "offsets": [52]}}, {"event_type": "PUR", "arguments": [{"text": "new states of the art", "nugget_type": "STR", "argument_type": "Aim", "tokens": ["new", "states", "of", "the", "art"], "offsets": [58, 59, 60, 61, 62]}, {"text": "on dm , pas , psd , amr 2015 and eds", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "dm", ",", "pas", ",", "psd", ",", "amr", "2015", "and", "eds"], "offsets": [63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73]}], "trigger": {"text": "setting", "tokens": ["setting"], "offsets": [57]}}], "document": ["most", "semantic", "parsers", "that", "map", "sentences", "to", "graph", "-", "based", "meaning", "representations", "are", "hand", "-", "designed", "for", "specific", "graphbanks", ".", "we", "present", "a", "compositional", "neural", "semantic", "parser", "which", "achieves", ",", "for", "the", "first", "time", ",", "competitive", "accuracies", "across", "a", "diverse", "range", "of", "graphbanks", ".", "incorporating", "bert", "embeddings", "and", "multi", "-", "task", "learning", "improves", "the", "accuracy", "further", ",", "setting", "new", "states", "of", "the", "art", "on", "dm", ",", "pas", ",", "psd", ",", "amr", "2015", "and", "eds", "."]}, {"venue": "ACL", "title": "RikiNet: Reading Wikipedia Pages for Natural Question Answering", "abstract": "Reading long documents to answer open-domain questions remains challenging in natural language understanding. In this paper, we introduce a new model, called RikiNet, which reads Wikipedia pages for natural question answering. RikiNet contains a dynamic paragraph dual-attention reader and a multi-level cascaded answer predictor. The reader dynamically represents the document and question by utilizing a set of complementary attention mechanisms. The representations are then fed into the predictor to obtain the span of the short answer, the paragraph of the long answer, and the answer type in a cascaded manner. On the Natural Questions (NQ) dataset, a single RikiNet achieves 74.3 F1 and 57.9 F1 on long-answer and short-answer tasks. To our best knowledge, it is the first single model that outperforms the single human performance. Furthermore, an ensemble RikiNet obtains 76.1 F1 and 61.3 F1 on long-answer and short-answer tasks, achieving the best performance on the official NQ leaderboard.", "doc_id": "e067ef77923c0d9466ef24319f80c496", "publication_year": 2020, "sentences": ["reading long documents to answer open - domain questions remains challenging in natural language understanding .", "in this paper , we introduce a new model , called rikinet , which reads wikipedia pages for natural question answering .", "rikinet contains a dynamic paragraph dual - attention reader and a multi - level cascaded answer predictor .", "the reader dynamically represents the document and question by utilizing a set of complementary attention mechanisms .", "the representations are then fed into the predictor to obtain the span of the short answer , the paragraph of the long answer , and the answer type in a cascaded manner .", "on the natural questions ( nq ) dataset , a single rikinet achieves 74 . 3 f1 and 57 . 9 f1 on long - answer and short - answer tasks .", "to our best knowledge , it is the first single model that outperforms the single human performance .", "furthermore , an ensemble rikinet obtains 76 . 1 f1 and 61 . 3 f1 on long - answer and short - answer tasks , achieving the best performance on the official nq leaderboard ."], "events": [{"event_type": "ITT", "arguments": [{"text": "reading long documents to answer open - domain questions", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["reading", "long", "documents", "to", "answer", "open", "-", "domain", "questions"], "offsets": [0, 1, 2, 3, 4, 5, 6, 7, 8]}], "trigger": {"text": "remains", "tokens": ["remains"], "offsets": [9]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [20]}, {"text": "rikinet", "nugget_type": "APP", "argument_type": "Content", "tokens": ["rikinet"], "offsets": [27]}, {"text": "reads", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["reads"], "offsets": [30]}, {"text": "natural question answering", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["natural", "question", "answering"], "offsets": [34, 35, 36]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [21]}}, {"event_type": "PUR", "arguments": [{"text": "wikipedia pages", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["wikipedia", "pages"], "offsets": [31, 32]}], "trigger": {"text": "reads", "tokens": ["reads"], "offsets": [30]}}, {"event_type": "MDS", "arguments": [{"text": "rikinet", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["rikinet"], "offsets": [38]}, {"text": "dynamic paragraph dual - attention reader", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["dynamic", "paragraph", "dual", "-", "attention", "reader"], "offsets": [41, 42, 43, 44, 45, 46]}, {"text": "multi - level cascaded answer predictor", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["multi", "-", "level", "cascaded", "answer", "predictor"], "offsets": [49, 50, 51, 52, 53, 54]}], "trigger": {"text": "contains", "tokens": ["contains"], "offsets": [39]}}, {"event_type": "MDS", "arguments": [{"text": "reader", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["reader"], "offsets": [57]}, {"text": "document", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["document"], "offsets": [61]}, {"text": "question", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["question"], "offsets": [63]}], "trigger": {"text": "represents", "tokens": ["represents"], "offsets": [59]}}, {"event_type": "MDS", "arguments": [{"text": "set of complementary attention mechanisms", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["set", "of", "complementary", "attention", "mechanisms"], "offsets": [67, 68, 69, 70, 71]}], "trigger": {"text": "utilizing", "tokens": ["utilizing"], "offsets": [65]}}, {"event_type": "MDS", "arguments": [{"text": "representations", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["representations"], "offsets": [74]}, {"text": "predictor", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["predictor"], "offsets": [80]}, {"text": "obtain", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["obtain"], "offsets": [82]}], "trigger": {"text": "fed", "tokens": ["fed"], "offsets": [77]}}, {"event_type": "PUR", "arguments": [{"text": "span of the short answer", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["span", "of", "the", "short", "answer"], "offsets": [84, 85, 86, 87, 88]}, {"text": "paragraph of the long answer", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["paragraph", "of", "the", "long", "answer"], "offsets": [91, 92, 93, 94, 95]}, {"text": "answer type", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["answer", "type"], "offsets": [99, 100]}, {"text": "in a cascaded manner", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "a", "cascaded", "manner"], "offsets": [101, 102, 103, 104]}], "trigger": {"text": "obtain", "tokens": ["obtain"], "offsets": [82]}}, {"event_type": "FAC", "arguments": [{"text": "natural questions ( nq ) dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["natural", "questions", "dataset"], "offsets": [108, 109, 113]}, {"text": "single rikinet", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["single", "rikinet"], "offsets": [116, 117]}, {"text": "74 . 3", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["74", ".", "3"], "offsets": [119, 120, 121]}, {"text": "f1", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["f1"], "offsets": [122]}, {"text": "long - answer tasks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["long", "-", "answer", "tasks"], "offsets": [129, 130, 131, 136]}, {"text": "short - answer tasks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["short", "-", "answer", "tasks"], "offsets": [133, 134, 135, 136]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [118]}}, {"event_type": "FAC", "arguments": [{"text": "natural questions ( nq ) dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["natural", "questions", "dataset"], "offsets": [108, 109, 113]}, {"text": "single rikinet", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["single", "rikinet"], "offsets": [116, 117]}, {"text": "74 . 3 f1", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["74", ".", "3", "f1"], "offsets": [119, 120, 121, 122]}, {"text": "57 . 9 f1", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["57", ".", "9", "f1"], "offsets": [124, 125, 126, 127]}, {"text": "long - answer tasks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["long", "-", "answer", "tasks"], "offsets": [129, 130, 131, 136]}, {"text": "short - answer tasks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["short", "-", "answer", "tasks"], "offsets": [133, 134, 135, 136]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [118]}}, {"event_type": "CMP", "arguments": [{"text": "rikinet", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["rikinet"], "offsets": [27]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [150]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [150]}}, {"event_type": "FAC", "arguments": [{"text": "ensemble rikinet", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["ensemble", "rikinet"], "offsets": [159, 160]}, {"text": "76 . 1", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["76", ".", "1"], "offsets": [162, 163, 164]}, {"text": "f1", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["f1"], "offsets": [165]}, {"text": "long - answer tasks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["long", "-", "answer", "tasks"], "offsets": [172, 173, 174, 179]}, {"text": "short - answer tasks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["short", "-", "answer", "tasks"], "offsets": [176, 177, 178, 179]}], "trigger": {"text": "obtains", "tokens": ["obtains"], "offsets": [161]}}, {"event_type": "FAC", "arguments": [{"text": "ensemble rikinet", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["ensemble", "rikinet"], "offsets": [159, 160]}, {"text": "61 . 3", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["61", ".", "3"], "offsets": [167, 168, 169]}, {"text": "f1", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["f1"], "offsets": [170]}, {"text": "long - answer tasks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["long", "-", "answer", "tasks"], "offsets": [172, 173, 174, 179]}, {"text": "short - answer tasks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["short", "-", "answer", "tasks"], "offsets": [176, 177, 178, 179]}], "trigger": {"text": "obtains", "tokens": ["obtains"], "offsets": [161]}}, {"event_type": "CMP", "arguments": [{"text": "ensemble rikinet", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["ensemble", "rikinet"], "offsets": [159, 160]}, {"text": "best", "nugget_type": "STR", "argument_type": "Result", "tokens": ["best"], "offsets": [183]}, {"text": "performance", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["performance"], "offsets": [184]}, {"text": "on the official nq leaderboard", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "the", "official", "natural", "questions", "leaderboard"], "offsets": [185, 186, 187, 108, 109, 189]}], "trigger": {"text": "achieving", "tokens": ["achieving"], "offsets": [181]}}], "document": ["reading", "long", "documents", "to", "answer", "open", "-", "domain", "questions", "remains", "challenging", "in", "natural", "language", "understanding", ".", "in", "this", "paper", ",", "we", "introduce", "a", "new", "model", ",", "called", "rikinet", ",", "which", "reads", "wikipedia", "pages", "for", "natural", "question", "answering", ".", "rikinet", "contains", "a", "dynamic", "paragraph", "dual", "-", "attention", "reader", "and", "a", "multi", "-", "level", "cascaded", "answer", "predictor", ".", "the", "reader", "dynamically", "represents", "the", "document", "and", "question", "by", "utilizing", "a", "set", "of", "complementary", "attention", "mechanisms", ".", "the", "representations", "are", "then", "fed", "into", "the", "predictor", "to", "obtain", "the", "span", "of", "the", "short", "answer", ",", "the", "paragraph", "of", "the", "long", "answer", ",", "and", "the", "answer", "type", "in", "a", "cascaded", "manner", ".", "on", "the", "natural", "questions", "(", "nq", ")", "dataset", ",", "a", "single", "rikinet", "achieves", "74", ".", "3", "f1", "and", "57", ".", "9", "f1", "on", "long", "-", "answer", "and", "short", "-", "answer", "tasks", ".", "to", "our", "best", "knowledge", ",", "it", "is", "the", "first", "single", "model", "that", "outperforms", "the", "single", "human", "performance", ".", "furthermore", ",", "an", "ensemble", "rikinet", "obtains", "76", ".", "1", "f1", "and", "61", ".", "3", "f1", "on", "long", "-", "answer", "and", "short", "-", "answer", "tasks", ",", "achieving", "the", "best", "performance", "on", "the", "official", "nq", "leaderboard", "."]}, {"venue": "ACL", "title": "Exploiting Personal Characteristics of Debaters for Predicting Persuasiveness", "abstract": "Predicting the persuasiveness of arguments has applications as diverse as writing assistance, essay scoring, and advertising. While clearly relevant to the task, the personal characteristics of an argument\u2019s source and audience have not yet been fully exploited toward automated persuasiveness prediction. In this paper, we model debaters\u2019 prior beliefs, interests, and personality traits based on their previous activity, without dependence on explicit user profiles or questionnaires. Using a dataset of over 60,000 argumentative discussions, comprising more than three million individual posts collected from the subreddit r/ChangeMyView, we demonstrate that our modeling of debater\u2019s characteristics enhances the prediction of argument persuasiveness as well as of debaters\u2019 resistance to persuasion.", "doc_id": "acc640f88096af9803d8676df3148436", "publication_year": 2020, "sentences": ["predicting the persuasiveness of arguments has applications as diverse as writing assistance , essay scoring , and advertising .", "while clearly relevant to the task , the personal characteristics of an argument \u2019 s source and audience have not yet been fully exploited toward automated persuasiveness prediction .", "in this paper , we model debaters \u2019 prior beliefs , interests , and personality traits based on their previous activity , without dependence on explicit user profiles or questionnaires .", "using a dataset of over 60 , 000 argumentative discussions , comprising more than three million individual posts collected from the subreddit r / changemyview , we demonstrate that our modeling of debater \u2019 s characteristics enhances the prediction of argument persuasiveness as well as of debaters \u2019 resistance to persuasion ."], "events": [{"event_type": "ITT", "arguments": [{"text": "predicting the persuasiveness of arguments", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["predicting", "the", "persuasiveness", "of", "arguments"], "offsets": [0, 1, 2, 3, 4]}], "trigger": {"text": "applications", "tokens": ["applications"], "offsets": [6]}}, {"event_type": "RWF", "arguments": [{"text": "not yet been fully exploited", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["not", "yet", "been", "fully", "exploited"], "offsets": [38, 39, 40, 41, 42]}, {"text": "predicting the persuasiveness of arguments", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["predicting", "the", "persuasiveness", "of", "arguments"], "offsets": [0, 1, 2, 3, 4]}, {"text": "personal characteristics of an argument \u2019 s source", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["personal", "characteristics", "of", "an", "argument", "\u2019", "s", "source"], "offsets": [27, 28, 29, 30, 31, 32, 33, 34]}], "trigger": {"text": "not yet been fully exploited", "tokens": ["not", "yet", "been", "fully", "exploited"], "offsets": [38, 39, 40, 41, 42]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [52]}, {"text": "debaters \u2019 prior beliefs", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["debaters", "\u2019", "prior", "beliefs"], "offsets": [54, 55, 56, 57]}, {"text": "interests", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["interests"], "offsets": [59]}, {"text": "personality traits", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["personality", "traits"], "offsets": [62, 63]}, {"text": "based on their previous activity , without dependence on explicit user", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["based", "on", "their", "previous", "activity", ",", "without", "dependence", "on", "explicit", "user"], "offsets": [64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74]}, {"text": "explicit user profiles", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["explicit", "user", "profiles"], "offsets": [73, 74, 75]}, {"text": "explicit user questionnaires", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["explicit", "user", "questionnaires"], "offsets": [73, 74, 77]}], "trigger": {"text": "model", "tokens": ["model"], "offsets": [53]}}, {"event_type": "WKS", "arguments": [{"text": "dataset of over 60 , 000 argumentative discussions", "nugget_type": "DST", "argument_type": "Content", "tokens": ["dataset", "of", "over", "60", ",", "000", "argumentative", "discussions"], "offsets": [81, 82, 83, 84, 85, 86, 87, 88]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [79]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [105]}, {"text": "enhances", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["enhances"], "offsets": [115]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [106]}}, {"event_type": "FAC", "arguments": [{"text": "modeling of debater \u2019 s characteristics", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["modeling", "of", "debater", "\u2019", "s", "characteristics"], "offsets": [109, 110, 111, 112, 113, 114]}, {"text": "prediction of argument persuasiveness", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["prediction", "of", "argument", "persuasiveness"], "offsets": [117, 118, 119, 120]}, {"text": "debaters \u2019 resistance to persuasion", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["debaters", "\u2019", "resistance", "to", "persuasion"], "offsets": [125, 126, 127, 128, 129]}], "trigger": {"text": "enhances", "tokens": ["enhances"], "offsets": [115]}}], "document": ["predicting", "the", "persuasiveness", "of", "arguments", "has", "applications", "as", "diverse", "as", "writing", "assistance", ",", "essay", "scoring", ",", "and", "advertising", ".", "while", "clearly", "relevant", "to", "the", "task", ",", "the", "personal", "characteristics", "of", "an", "argument", "\u2019", "s", "source", "and", "audience", "have", "not", "yet", "been", "fully", "exploited", "toward", "automated", "persuasiveness", "prediction", ".", "in", "this", "paper", ",", "we", "model", "debaters", "\u2019", "prior", "beliefs", ",", "interests", ",", "and", "personality", "traits", "based", "on", "their", "previous", "activity", ",", "without", "dependence", "on", "explicit", "user", "profiles", "or", "questionnaires", ".", "using", "a", "dataset", "of", "over", "60", ",", "000", "argumentative", "discussions", ",", "comprising", "more", "than", "three", "million", "individual", "posts", "collected", "from", "the", "subreddit", "r", "/", "changemyview", ",", "we", "demonstrate", "that", "our", "modeling", "of", "debater", "\u2019", "s", "characteristics", "enhances", "the", "prediction", "of", "argument", "persuasiveness", "as", "well", "as", "of", "debaters", "\u2019", "resistance", "to", "persuasion", "."]}, {"venue": "ACL", "title": "Extracting Headless MWEs from Dependency Parse Trees: Parsing, Tagging, and Joint Modeling Approaches", "abstract": "An interesting and frequent type of multi-word expression (MWE) is the headless MWE, for which there are no true internal syntactic dominance relations; examples include many named entities (\u201cWells Fargo\u201d) and dates (\u201cJuly 5, 2020\u201d) as well as certain productive constructions (\u201cblow for blow\u201d, \u201cday after day\u201d). Despite their special status and prevalence, current dependency-annotation schemes require treating such flat structures as if they had internal syntactic heads, and most current parsers handle them in the same fashion as headed constructions. Meanwhile, outside the context of parsing, taggers are typically used for identifying MWEs, but taggers might benefit from structural information. We empirically compare these two common strategies\u2014parsing and tagging\u2014for predicting flat MWEs. Additionally, we propose an efficient joint decoding algorithm that combines scores from both strategies. Experimental results on the MWE-Aware English Dependency Corpus and on six non-English dependency treebanks with frequent flat structures show that: (1) tagging is more accurate than parsing for identifying flat-structure MWEs, (2) our joint decoder reconciles the two different views and, for non-BERT features, leads to higher accuracies, and (3) most of the gains result from feature sharing between the parsers and taggers.", "doc_id": "5adf6959f4a30421c1a17da044a6a87c", "publication_year": 2020, "sentences": ["an interesting and frequent type of multi - word expression ( mwe ) is the headless mwe , for which there are no true internal syntactic dominance relations ; examples include many named entities ( \u201c wells fargo \u201d ) and dates ( \u201c july 5 , 2020 \u201d ) as well as certain productive constructions ( \u201c blow for blow \u201d , \u201c day after day \u201d ) .", "despite their special status and prevalence , current dependency - annotation schemes require treating such flat structures as if they had internal syntactic heads , and most current parsers handle them in the same fashion as headed constructions .", "meanwhile , outside the context of parsing , taggers are typically used for identifying mwes , but taggers might benefit from structural information .", "we empirically compare these two common strategies \u2014 parsing and tagging \u2014 for predicting flat mwes .", "additionally , we propose an efficient joint decoding algorithm that combines scores from both strategies .", "experimental results on the mwe - aware english dependency corpus and on six non - english dependency treebanks with frequent flat structures show that : ( 1 ) tagging is more accurate than parsing for identifying flat - structure mwes , ( 2 ) our joint decoder reconciles the two different views and , for non - bert features , leads to higher accuracies , and ( 3 ) most of the gains result from feature sharing between the parsers and taggers ."], "events": [{"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [133]}, {"text": "parsing", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["parsing"], "offsets": [141]}, {"text": "tagging", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["tagging"], "offsets": [143]}, {"text": "predicting", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["predicting"], "offsets": [146]}], "trigger": {"text": "empirically compare", "tokens": ["empirically", "compare"], "offsets": [134, 135]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [152]}, {"text": "joint decoding algorithm", "nugget_type": "APP", "argument_type": "Content", "tokens": ["joint", "decoding", "algorithm"], "offsets": [156, 157, 158]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [153]}}, {"event_type": "CMP", "arguments": [{"text": "more accurate", "nugget_type": "STR", "argument_type": "Result", "tokens": ["more", "accurate"], "offsets": [196, 197]}], "trigger": {"text": "more accurate", "tokens": ["more", "accurate"], "offsets": [196, 197]}}, {"event_type": "FAC", "arguments": [{"text": "two different views", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["two", "different", "views"], "offsets": [215, 216, 217]}, {"text": "mwe - aware english dependency corpus", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["mwe", "-", "aware", "english", "dependency", "corpus"], "offsets": [170, 171, 172, 173, 174, 175]}, {"text": "six non - english dependency treebanks with frequent flat structures", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["six", "non", "-", "english", "dependency", "treebanks", "with", "frequent", "flat", "structures"], "offsets": [178, 179, 180, 181, 182, 183, 184, 185, 186, 187]}, {"text": "joint decoder", "nugget_type": "MOD", "argument_type": "Subject", "tokens": ["joint", "decoder"], "offsets": [211, 212]}], "trigger": {"text": "reconciles", "tokens": ["reconciles"], "offsets": [213]}}, {"event_type": "FAC", "arguments": [{"text": "mwe - aware english dependency corpus", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["mwe", "-", "aware", "english", "dependency", "corpus"], "offsets": [170, 171, 172, 173, 174, 175]}, {"text": "six non - english dependency treebanks with frequent flat structures", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["six", "non", "-", "english", "dependency", "treebanks", "with", "frequent", "flat", "structures"], "offsets": [178, 179, 180, 181, 182, 183, 184, 185, 186, 187]}, {"text": "joint decoder", "nugget_type": "MOD", "argument_type": "Subject", "tokens": ["joint", "decoder"], "offsets": [211, 212]}, {"text": "higher accuracies", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["higher", "accuracies"], "offsets": [228, 229]}], "trigger": {"text": "leads", "tokens": ["leads"], "offsets": [226]}}, {"event_type": "FAC", "arguments": [{"text": "gains", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["gains"], "offsets": [238]}, {"text": "feature sharing between the parsers and taggers", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["feature", "sharing", "between", "the", "parsers", "and", "taggers"], "offsets": [241, 242, 243, 244, 245, 246, 247]}], "trigger": {"text": "result", "tokens": ["result"], "offsets": [239]}}, {"event_type": "PUR", "arguments": [{"text": "flat mwes", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["flat", "multi", "-", "word", "expression"], "offsets": [147, 6, 7, 8, 9]}], "trigger": {"text": "predicting", "tokens": ["predicting"], "offsets": [146]}}, {"event_type": "FIN", "arguments": [{"text": "more accurate", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["more", "accurate"], "offsets": [196, 197]}, {"text": "reconciles", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["reconciles"], "offsets": [213]}, {"text": "result", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["result"], "offsets": [239]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [188]}}], "document": ["an", "interesting", "and", "frequent", "type", "of", "multi", "-", "word", "expression", "(", "mwe", ")", "is", "the", "headless", "mwe", ",", "for", "which", "there", "are", "no", "true", "internal", "syntactic", "dominance", "relations", ";", "examples", "include", "many", "named", "entities", "(", "\u201c", "wells", "fargo", "\u201d", ")", "and", "dates", "(", "\u201c", "july", "5", ",", "2020", "\u201d", ")", "as", "well", "as", "certain", "productive", "constructions", "(", "\u201c", "blow", "for", "blow", "\u201d", ",", "\u201c", "day", "after", "day", "\u201d", ")", ".", "despite", "their", "special", "status", "and", "prevalence", ",", "current", "dependency", "-", "annotation", "schemes", "require", "treating", "such", "flat", "structures", "as", "if", "they", "had", "internal", "syntactic", "heads", ",", "and", "most", "current", "parsers", "handle", "them", "in", "the", "same", "fashion", "as", "headed", "constructions", ".", "meanwhile", ",", "outside", "the", "context", "of", "parsing", ",", "taggers", "are", "typically", "used", "for", "identifying", "mwes", ",", "but", "taggers", "might", "benefit", "from", "structural", "information", ".", "we", "empirically", "compare", "these", "two", "common", "strategies", "\u2014", "parsing", "and", "tagging", "\u2014", "for", "predicting", "flat", "mwes", ".", "additionally", ",", "we", "propose", "an", "efficient", "joint", "decoding", "algorithm", "that", "combines", "scores", "from", "both", "strategies", ".", "experimental", "results", "on", "the", "mwe", "-", "aware", "english", "dependency", "corpus", "and", "on", "six", "non", "-", "english", "dependency", "treebanks", "with", "frequent", "flat", "structures", "show", "that", ":", "(", "1", ")", "tagging", "is", "more", "accurate", "than", "parsing", "for", "identifying", "flat", "-", "structure", "mwes", ",", "(", "2", ")", "our", "joint", "decoder", "reconciles", "the", "two", "different", "views", "and", ",", "for", "non", "-", "bert", "features", ",", "leads", "to", "higher", "accuracies", ",", "and", "(", "3", ")", "most", "of", "the", "gains", "result", "from", "feature", "sharing", "between", "the", "parsers", "and", "taggers", "."]}, {"venue": "ACL", "title": "Evaluation of Thematic Coherence in Microblogs", "abstract": "Collecting together microblogs representing opinions about the same topics within the same timeframe is useful to a number of different tasks and practitioners. A major question is how to evaluate the quality of such thematic clusters. Here we create a corpus of microblog clusters from three different domains and time windows and define the task of evaluating thematic coherence. We provide annotation guidelines and human annotations of thematic coherence by journalist experts. We subsequently investigate the efficacy of different automated evaluation metrics for the task. We consider a range of metrics including surface level metrics, ones for topic model coherence and text generation metrics (TGMs). While surface level metrics perform well, outperforming topic coherence metrics, they are not as consistent as TGMs. TGMs are more reliable than all other metrics considered for capturing thematic coherence in microblog clusters due to being less sensitive to the effect of time windows.", "doc_id": "dd70b5ee8cbd447d59cd374930029e5c", "publication_year": 2021, "sentences": ["collecting together microblogs representing opinions about the same topics within the same timeframe is useful to a number of different tasks and practitioners .", "a major question is how to evaluate the quality of such thematic clusters .", "here we create a corpus of microblog clusters from three different domains and time windows and define the task of evaluating thematic coherence .", "we provide annotation guidelines and human annotations of thematic coherence by journalist experts .", "we subsequently investigate the efficacy of different automated evaluation metrics for the task .", "we consider a range of metrics including surface level metrics , ones for topic model coherence and text generation metrics ( tgms ) .", "while surface level metrics perform well , outperforming topic coherence metrics , they are not as consistent as tgms .", "tgms are more reliable than all other metrics considered for capturing thematic coherence in microblog clusters due to being less sensitive to the effect of time windows ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [39]}, {"text": "corpus of microblog clusters", "nugget_type": "DST", "argument_type": "Content", "tokens": ["corpus", "of", "microblog", "clusters"], "offsets": [42, 43, 44, 45]}], "trigger": {"text": "create", "tokens": ["create"], "offsets": [40]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [39]}, {"text": "evaluating thematic coherence", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["evaluating", "thematic", "coherence"], "offsets": [58, 59, 60]}], "trigger": {"text": "define", "tokens": ["define"], "offsets": [54]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [62]}, {"text": "annotation guidelines", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["annotation", "guidelines"], "offsets": [64, 65]}, {"text": "human annotations", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["human", "annotations"], "offsets": [67, 68]}], "trigger": {"text": "provide", "tokens": ["provide"], "offsets": [63]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [76]}, {"text": "efficacy of different automated evaluation metrics", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["efficacy", "of", "different", "automated", "evaluation", "metrics"], "offsets": [80, 81, 82, 83, 84, 85]}, {"text": "evaluating thematic coherence", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["evaluating", "thematic", "coherence"], "offsets": [58, 59, 60]}], "trigger": {"text": "investigate", "tokens": ["investigate"], "offsets": [78]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [90]}, {"text": "surface level metrics", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["surface", "level", "metrics"], "offsets": [97, 98, 99]}, {"text": "ones for topic model coherence", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["ones", "for", "topic", "model", "coherence"], "offsets": [101, 102, 103, 104, 105]}, {"text": "text generation metrics", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["text", "generation", "metrics"], "offsets": [107, 108, 109]}], "trigger": {"text": "consider", "tokens": ["consider"], "offsets": [91]}}, {"event_type": "CMP", "arguments": [], "trigger": {"text": "outperforming", "tokens": ["outperforming"], "offsets": [121]}}], "document": ["collecting", "together", "microblogs", "representing", "opinions", "about", "the", "same", "topics", "within", "the", "same", "timeframe", "is", "useful", "to", "a", "number", "of", "different", "tasks", "and", "practitioners", ".", "a", "major", "question", "is", "how", "to", "evaluate", "the", "quality", "of", "such", "thematic", "clusters", ".", "here", "we", "create", "a", "corpus", "of", "microblog", "clusters", "from", "three", "different", "domains", "and", "time", "windows", "and", "define", "the", "task", "of", "evaluating", "thematic", "coherence", ".", "we", "provide", "annotation", "guidelines", "and", "human", "annotations", "of", "thematic", "coherence", "by", "journalist", "experts", ".", "we", "subsequently", "investigate", "the", "efficacy", "of", "different", "automated", "evaluation", "metrics", "for", "the", "task", ".", "we", "consider", "a", "range", "of", "metrics", "including", "surface", "level", "metrics", ",", "ones", "for", "topic", "model", "coherence", "and", "text", "generation", "metrics", "(", "tgms", ")", ".", "while", "surface", "level", "metrics", "perform", "well", ",", "outperforming", "topic", "coherence", "metrics", ",", "they", "are", "not", "as", "consistent", "as", "tgms", ".", "tgms", "are", "more", "reliable", "than", "all", "other", "metrics", "considered", "for", "capturing", "thematic", "coherence", "in", "microblog", "clusters", "due", "to", "being", "less", "sensitive", "to", "the", "effect", "of", "time", "windows", "."]}, {"venue": "ACL", "title": "Language Model Evaluation Beyond Perplexity", "abstract": "We propose an alternate approach to quantifying how well language models learn natural language: we ask how well they match the statistical tendencies of natural language. To answer this question, we analyze whether text generated from language models exhibits the statistical tendencies present in the human-generated text on which they were trained. We provide a framework\u2013paired with significance tests\u2013for evaluating the fit of language models to these trends. We find that neural language models appear to learn only a subset of the tendencies considered, but align much more closely with empirical trends than proposed theoretical distributions (when present). Further, the fit to different distributions is highly-dependent on both model architecture and generation strategy. As concrete examples, text generated under the nucleus sampling scheme adheres more closely to the type\u2013token relationship of natural language than text produced using standard ancestral sampling; text from LSTMs reflects the natural language distributions over length, stopwords, and symbols surprisingly well.", "doc_id": "5620a076bac6ded51f9e5002ffcc3cee", "publication_year": 2021, "sentences": ["we propose an alternate approach to quantifying how well language models learn natural language : we ask how well they match the statistical tendencies of natural language .", "to answer this question , we analyze whether text generated from language models exhibits the statistical tendencies present in the human - generated text on which they were trained .", "we provide a framework \u2013 paired with significance tests \u2013 for evaluating the fit of language models to these trends .", "we find that neural language models appear to learn only a subset of the tendencies considered , but align much more closely with empirical trends than proposed theoretical distributions ( when present ) .", "further , the fit to different distributions is highly - dependent on both model architecture and generation strategy .", "as concrete examples , text generated under the nucleus sampling scheme adheres more closely to the type \u2013 token relationship of natural language than text produced using standard ancestral sampling ; text from lstms reflects the natural language distributions over length , stopwords , and symbols surprisingly well ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [0]}, {"text": "alternate approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["alternate", "approach"], "offsets": [3, 4]}, {"text": "quantifying", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["quantifying"], "offsets": [6]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [1]}}, {"event_type": "MDS", "arguments": [{"text": "text", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["text"], "offsets": [36]}, {"text": "language models", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["language", "models"], "offsets": [39, 40]}], "trigger": {"text": "generated", "tokens": ["generated"], "offsets": [37]}}, {"event_type": "WKS", "arguments": [{"text": "statistical tendencies present", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["statistical", "tendencies", "present"], "offsets": [43, 44, 45]}, {"text": "in the human - generated text", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "human", "-", "generated", "text"], "offsets": [46, 47, 48, 49, 50, 51]}], "trigger": {"text": "exhibits", "tokens": ["exhibits"], "offsets": [41]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [58]}, {"text": "framework \u2013 paired with significance tests", "nugget_type": "APP", "argument_type": "Content", "tokens": ["framework", "\u2013", "paired", "with", "significance", "tests"], "offsets": [61, 62, 63, 64, 65, 66]}, {"text": "evaluating", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["evaluating"], "offsets": [69]}], "trigger": {"text": "provide", "tokens": ["provide"], "offsets": [59]}}, {"event_type": "PUR", "arguments": [{"text": "fit of language models to these trends", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["fit", "of", "language", "models", "to", "these", "trends"], "offsets": [71, 72, 73, 74, 75, 76, 77]}], "trigger": {"text": "evaluating", "tokens": ["evaluating"], "offsets": [69]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [79]}, {"text": "learn", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["learn"], "offsets": [87]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [80]}}, {"event_type": "FAC", "arguments": [{"text": "neural language models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["neural", "language", "models"], "offsets": [82, 83, 84]}, {"text": "only", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["only"], "offsets": [88]}, {"text": "subset of the tendencies considered", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["subset", "of", "the", "tendencies", "considered"], "offsets": [90, 91, 92, 93, 94]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [87]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [79]}, {"text": "align", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["align"], "offsets": [97]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [80]}}, {"event_type": "CMP", "arguments": [{"text": "neural language models", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["neural", "language", "models"], "offsets": [82, 83, 84]}, {"text": "empirical trends", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["empirical", "trends"], "offsets": [102, 103]}, {"text": "proposed theoretical distributions", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["proposed", "theoretical", "distributions"], "offsets": [105, 106, 107]}, {"text": "more closely", "nugget_type": "STR", "argument_type": "Result", "tokens": ["more", "closely"], "offsets": [99, 100]}, {"text": "much", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["much"], "offsets": [98]}], "trigger": {"text": "align", "tokens": ["align"], "offsets": [97]}}, {"event_type": "CMP", "arguments": [{"text": "text", "nugget_type": "FEA", "argument_type": "Arg1", "tokens": ["text"], "offsets": [136]}, {"text": "generated under the nucleus sampling scheme", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["generated", "under", "the", "nucleus", "sampling", "scheme"], "offsets": [137, 138, 139, 140, 141, 142]}, {"text": "more closely", "nugget_type": "STR", "argument_type": "Result", "tokens": ["more", "closely"], "offsets": [144, 145]}, {"text": "type \u2013 token relationship of natural language", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["type", "\u2013", "token", "relationship", "of", "natural", "language"], "offsets": [148, 149, 150, 151, 152, 153, 154]}, {"text": "text", "nugget_type": "FEA", "argument_type": "Arg2", "tokens": ["text"], "offsets": [156]}, {"text": "produced using standard ancestral sampling", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["produced", "using", "standard", "ancestral", "sampling"], "offsets": [157, 158, 159, 160, 161]}], "trigger": {"text": "adheres", "tokens": ["adheres"], "offsets": [143]}}, {"event_type": "FAC", "arguments": [{"text": "text", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["text"], "offsets": [163]}, {"text": "from lstms", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "lstms"], "offsets": [164, 165]}, {"text": "natural language distributions", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["natural", "language", "distributions"], "offsets": [168, 169, 170]}, {"text": "length", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["length"], "offsets": [172]}, {"text": "stopwords", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["stopwords"], "offsets": [174]}, {"text": "symbols", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["symbols"], "offsets": [177]}], "trigger": {"text": "reflects", "tokens": ["reflects"], "offsets": [166]}}], "document": ["we", "propose", "an", "alternate", "approach", "to", "quantifying", "how", "well", "language", "models", "learn", "natural", "language", ":", "we", "ask", "how", "well", "they", "match", "the", "statistical", "tendencies", "of", "natural", "language", ".", "to", "answer", "this", "question", ",", "we", "analyze", "whether", "text", "generated", "from", "language", "models", "exhibits", "the", "statistical", "tendencies", "present", "in", "the", "human", "-", "generated", "text", "on", "which", "they", "were", "trained", ".", "we", "provide", "a", "framework", "\u2013", "paired", "with", "significance", "tests", "\u2013", "for", "evaluating", "the", "fit", "of", "language", "models", "to", "these", "trends", ".", "we", "find", "that", "neural", "language", "models", "appear", "to", "learn", "only", "a", "subset", "of", "the", "tendencies", "considered", ",", "but", "align", "much", "more", "closely", "with", "empirical", "trends", "than", "proposed", "theoretical", "distributions", "(", "when", "present", ")", ".", "further", ",", "the", "fit", "to", "different", "distributions", "is", "highly", "-", "dependent", "on", "both", "model", "architecture", "and", "generation", "strategy", ".", "as", "concrete", "examples", ",", "text", "generated", "under", "the", "nucleus", "sampling", "scheme", "adheres", "more", "closely", "to", "the", "type", "\u2013", "token", "relationship", "of", "natural", "language", "than", "text", "produced", "using", "standard", "ancestral", "sampling", ";", "text", "from", "lstms", "reflects", "the", "natural", "language", "distributions", "over", "length", ",", "stopwords", ",", "and", "symbols", "surprisingly", "well", "."]}, {"venue": "ACL", "title": "Can Explanations Be Useful for Calibrating Black Box Models?", "abstract": "NLP practitioners often want to take existing trained models and apply them to data from new domains. While fine-tuning or few-shot learning can be used to adapt a base model, there is no single recipe for making these techniques work; moreover, one may not have access to the original model weights if it is deployed as a black box. We study how to improve a black box model\u2019s performance on a new domain by leveraging explanations of the model\u2019s behavior. Our approach first extracts a set of features combining human intuition about the task with model attributions generated by black box interpretation techniques, then uses a simple calibrator, in the form of a classifier, to predict whether the base model was correct or not. We experiment with our method on two tasks, extractive question answering and natural language inference, covering adaptation from several pairs of domains with limited target-domain data. The experimental results across all the domain pairs show that explanations are useful for calibrating these models, boosting accuracy when predictions do not have to be returned on every example. We further show that the calibration model transfers to some extent between tasks.", "doc_id": "de829e737218e840f59698d9f537bf33", "publication_year": 2022, "sentences": ["nlp practitioners often want to take existing trained models and apply them to data from new domains .", "while fine - tuning or few - shot learning can be used to adapt a base model , there is no single recipe for making these techniques work ; moreover , one may not have access to the original model weights if it is deployed as a black box .", "we study how to improve a black box model \u2019 s performance on a new domain by leveraging explanations of the model \u2019 s behavior .", "our approach first extracts a set of features combining human intuition about the task with model attributions generated by black box interpretation techniques , then uses a simple calibrator , in the form of a classifier , to predict whether the base model was correct or not .", "we experiment with our method on two tasks , extractive question answering and natural language inference , covering adaptation from several pairs of domains with limited target - domain data .", "the experimental results across all the domain pairs show that explanations are useful for calibrating these models , boosting accuracy when predictions do not have to be returned on every example .", "we further show that the calibration model transfers to some extent between tasks ."], "events": [{"event_type": "RWF", "arguments": [{"text": "no single recipe", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["no", "single", "recipe"], "offsets": [38, 39, 40]}, {"text": "these techniques work", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["these", "techniques", "work"], "offsets": [43, 44, 45]}], "trigger": {"text": "making", "tokens": ["making"], "offsets": [42]}}, {"event_type": "RWF", "arguments": [{"text": "if it is deployed as a black box", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["if", "it", "is", "deployed", "as", "a", "black", "box"], "offsets": [59, 60, 61, 62, 63, 64, 65, 66]}, {"text": "original model weights", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["original", "model", "weights"], "offsets": [56, 57, 58]}], "trigger": {"text": "not have access", "tokens": ["not", "have", "access"], "offsets": [51, 52, 53]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [68]}, {"text": "explanations of the model \u2019 s behavior", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["explanations", "of", "the", "model", "\u2019", "s", "behavior"], "offsets": [86, 87, 88, 89, 90, 91, 92]}, {"text": "improve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improve"], "offsets": [72]}], "trigger": {"text": "leveraging", "tokens": ["leveraging"], "offsets": [85]}}, {"event_type": "PUR", "arguments": [{"text": "black box model \u2019 s performance", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["black", "box", "model", "\u2019", "s", "performance"], "offsets": [74, 75, 76, 77, 78, 79]}, {"text": "on a new domain", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "a", "new", "domain"], "offsets": [80, 81, 82, 83]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [72]}}, {"event_type": "MDS", "arguments": [{"text": "set of features", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["set", "of", "features"], "offsets": [99, 100, 101]}, {"text": "human intuition", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["human", "intuition"], "offsets": [103, 104]}], "trigger": {"text": "combining", "tokens": ["combining"], "offsets": [102]}}, {"event_type": "MDS", "arguments": [{"text": "black box interpretation techniques", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["black", "box", "interpretation", "techniques"], "offsets": [113, 114, 115, 116]}, {"text": "model attributions", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["model", "attributions"], "offsets": [109, 110]}], "trigger": {"text": "generated", "tokens": ["generated"], "offsets": [111]}}, {"event_type": "MDS", "arguments": [{"text": "simple calibrator", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["simple", "calibrator"], "offsets": [121, 122]}, {"text": "base model", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["base", "model"], "offsets": [135, 136]}, {"text": "correct or not", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["correct", "or", "not"], "offsets": [138, 139, 140]}], "trigger": {"text": "predict", "tokens": ["predict"], "offsets": [132]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [142]}, {"text": "method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["method"], "offsets": [146]}, {"text": "extractive question answering and natural language inference", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["extractive", "question", "answering", "and", "natural", "language", "inference"], "offsets": [151, 152, 153, 154, 155, 156, 157]}], "trigger": {"text": "experiment", "tokens": ["experiment"], "offsets": [143]}}, {"event_type": "FAC", "arguments": [{"text": "explanations", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["explanations"], "offsets": [183]}, {"text": "calibrating", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["calibrating"], "offsets": [187]}], "trigger": {"text": "useful", "tokens": ["useful"], "offsets": [185]}}, {"event_type": "PUR", "arguments": [{"text": "these models", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["these", "models"], "offsets": [188, 189]}], "trigger": {"text": "calibrating", "tokens": ["calibrating"], "offsets": [187]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [205]}, {"text": "transfers", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["transfers"], "offsets": [212]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [207]}}, {"event_type": "FAC", "arguments": [{"text": "calibration model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["calibration", "model"], "offsets": [210, 211]}, {"text": "some extent", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["some", "extent"], "offsets": [214, 215]}], "trigger": {"text": "transfers", "tokens": ["transfers"], "offsets": [212]}}, {"event_type": "FAC", "arguments": [{"text": "explanations", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["explanations"], "offsets": [183]}, {"text": "accuracy", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["accuracy"], "offsets": [192]}, {"text": "when predictions do not have to be returned on every example", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "predictions", "do", "not", "have", "to", "be", "returned", "on", "every", "example"], "offsets": [193, 194, 195, 196, 197, 198, 199, 200, 201, 202, 203]}], "trigger": {"text": "boosting", "tokens": ["boosting"], "offsets": [191]}}], "document": ["nlp", "practitioners", "often", "want", "to", "take", "existing", "trained", "models", "and", "apply", "them", "to", "data", "from", "new", "domains", ".", "while", "fine", "-", "tuning", "or", "few", "-", "shot", "learning", "can", "be", "used", "to", "adapt", "a", "base", "model", ",", "there", "is", "no", "single", "recipe", "for", "making", "these", "techniques", "work", ";", "moreover", ",", "one", "may", "not", "have", "access", "to", "the", "original", "model", "weights", "if", "it", "is", "deployed", "as", "a", "black", "box", ".", "we", "study", "how", "to", "improve", "a", "black", "box", "model", "\u2019", "s", "performance", "on", "a", "new", "domain", "by", "leveraging", "explanations", "of", "the", "model", "\u2019", "s", "behavior", ".", "our", "approach", "first", "extracts", "a", "set", "of", "features", "combining", "human", "intuition", "about", "the", "task", "with", "model", "attributions", "generated", "by", "black", "box", "interpretation", "techniques", ",", "then", "uses", "a", "simple", "calibrator", ",", "in", "the", "form", "of", "a", "classifier", ",", "to", "predict", "whether", "the", "base", "model", "was", "correct", "or", "not", ".", "we", "experiment", "with", "our", "method", "on", "two", "tasks", ",", "extractive", "question", "answering", "and", "natural", "language", "inference", ",", "covering", "adaptation", "from", "several", "pairs", "of", "domains", "with", "limited", "target", "-", "domain", "data", ".", "the", "experimental", "results", "across", "all", "the", "domain", "pairs", "show", "that", "explanations", "are", "useful", "for", "calibrating", "these", "models", ",", "boosting", "accuracy", "when", "predictions", "do", "not", "have", "to", "be", "returned", "on", "every", "example", ".", "we", "further", "show", "that", "the", "calibration", "model", "transfers", "to", "some", "extent", "between", "tasks", "."]}, {"venue": "ACL", "title": "COSY: COunterfactual SYntax for Cross-Lingual Understanding", "abstract": "Pre-trained multilingual language models, e.g., multilingual-BERT, are widely used in cross-lingual tasks, yielding the state-of-the-art performance. However, such models suffer from a large performance gap between source and target languages, especially in the zero-shot setting, where the models are fine-tuned only on English but tested on other languages for the same task. We tackle this issue by incorporating language-agnostic information, specifically, universal syntax such as dependency relations and POS tags, into language models, based on the observation that universal syntax is transferable across different languages. Our approach, called COunterfactual SYntax (COSY), includes the design of SYntax-aware networks as well as a COunterfactual training method to implicitly force the networks to learn not only the semantics but also the syntax. To evaluate COSY, we conduct cross-lingual experiments on natural language inference and question answering using mBERT and XLM-R as network backbones. Our results show that COSY achieves the state-of-the-art performance for both tasks, without using auxiliary training data.", "doc_id": "53f5a11f3cdf89849f80ebb27a281ff4", "publication_year": 2021, "sentences": ["pre - trained multilingual language models , e . g . , multilingual - bert , are widely used in cross - lingual tasks , yielding the state - of - the - art performance .", "however , such models suffer from a large performance gap between source and target languages , especially in the zero - shot setting , where the models are fine - tuned only on english but tested on other languages for the same task .", "we tackle this issue by incorporating language - agnostic information , specifically , universal syntax such as dependency relations and pos tags , into language models , based on the observation that universal syntax is transferable across different languages .", "our approach , called counterfactual syntax ( cosy ) , includes the design of syntax - aware networks as well as a counterfactual training method to implicitly force the networks to learn not only the semantics but also the syntax .", "to evaluate cosy , we conduct cross - lingual experiments on natural language inference and question answering using mbert and xlm - r as network backbones .", "our results show that cosy achieves the state - of - the - art performance for both tasks , without using auxiliary training data ."], "events": [{"event_type": "ITT", "arguments": [{"text": "pre - trained multilingual language models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["pre", "-", "trained", "multilingual", "language", "models"], "offsets": [0, 1, 2, 3, 4, 5]}], "trigger": {"text": "widely used", "tokens": ["widely", "used"], "offsets": [17, 18]}}, {"event_type": "RWF", "arguments": [{"text": "large", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["large"], "offsets": [43]}, {"text": "models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["models"], "offsets": [39]}, {"text": "suffer", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["suffer"], "offsets": [40]}, {"text": "performance gap", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["performance", "gap"], "offsets": [44, 45]}, {"text": "between source languages", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["between", "source", "languages"], "offsets": [46, 47, 50]}], "trigger": {"text": "suffer", "tokens": ["suffer"], "offsets": [40]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [80]}, {"text": "large performance gap between source languages", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["large", "performance", "gap", "between", "source", "languages"], "offsets": [43, 44, 45, 46, 47, 50]}, {"text": "target languages", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["target", "languages"], "offsets": [49, 50]}], "trigger": {"text": "tackle", "tokens": ["tackle"], "offsets": [81]}}, {"event_type": "MDS", "arguments": [{"text": "language models", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["language", "models"], "offsets": [104, 105]}, {"text": "language - agnostic information", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["language", "-", "agnostic", "information"], "offsets": [86, 87, 88, 89]}], "trigger": {"text": "incorporating", "tokens": ["incorporating"], "offsets": [85]}}, {"event_type": "PRP", "arguments": [{"text": "counterfactual syntax", "nugget_type": "APP", "argument_type": "Content", "tokens": ["counterfactual", "syntax"], "offsets": [124, 125]}], "trigger": {"text": "called", "tokens": ["called"], "offsets": [123]}}, {"event_type": "MDS", "arguments": [{"text": "semantics", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["semantics"], "offsets": [155]}, {"text": "syntax", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["syntax"], "offsets": [159]}, {"text": "networks", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["networks"], "offsets": [149]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [151]}}, {"event_type": "WKS", "arguments": [{"text": "evaluate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["evaluate"], "offsets": [162]}, {"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [165]}, {"text": "cross - lingual experiments", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["cross", "-", "lingual", "experiments"], "offsets": [167, 168, 169, 170]}], "trigger": {"text": "conduct", "tokens": ["conduct"], "offsets": [166]}}, {"event_type": "PUR", "arguments": [{"text": "counterfactual syntax", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["counterfactual", "syntax"], "offsets": [124, 125]}], "trigger": {"text": "evaluate", "tokens": ["evaluate"], "offsets": [162]}}, {"event_type": "MDS", "arguments": [{"text": "multilingual - bert", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["multilingual", "-", "bert"], "offsets": [12, 13, 14]}, {"text": "xlm - r", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["xlm", "-", "r"], "offsets": [181, 182, 183]}, {"text": "mbert", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["mbert"], "offsets": [179]}, {"text": "natural language inference", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["natural", "language", "inference"], "offsets": [172, 173, 174]}, {"text": "question answering", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["question", "answering"], "offsets": [176, 177]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [178]}}, {"event_type": "FIN", "arguments": [{"text": "achieves", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["achieves"], "offsets": [193]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [190]}}, {"event_type": "FAC", "arguments": [{"text": "without using auxiliary training data", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["without", "using", "auxiliary", "training", "data"], "offsets": [207, 208, 209, 210, 211]}, {"text": "counterfactual syntax", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["counterfactual", "syntax"], "offsets": [124, 125]}, {"text": "state - of - the - art", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["state", "-", "of", "-", "the", "-", "art"], "offsets": [195, 196, 197, 198, 199, 200, 201]}, {"text": "performance", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["performance"], "offsets": [202]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [193]}}], "document": ["pre", "-", "trained", "multilingual", "language", "models", ",", "e", ".", "g", ".", ",", "multilingual", "-", "bert", ",", "are", "widely", "used", "in", "cross", "-", "lingual", "tasks", ",", "yielding", "the", "state", "-", "of", "-", "the", "-", "art", "performance", ".", "however", ",", "such", "models", "suffer", "from", "a", "large", "performance", "gap", "between", "source", "and", "target", "languages", ",", "especially", "in", "the", "zero", "-", "shot", "setting", ",", "where", "the", "models", "are", "fine", "-", "tuned", "only", "on", "english", "but", "tested", "on", "other", "languages", "for", "the", "same", "task", ".", "we", "tackle", "this", "issue", "by", "incorporating", "language", "-", "agnostic", "information", ",", "specifically", ",", "universal", "syntax", "such", "as", "dependency", "relations", "and", "pos", "tags", ",", "into", "language", "models", ",", "based", "on", "the", "observation", "that", "universal", "syntax", "is", "transferable", "across", "different", "languages", ".", "our", "approach", ",", "called", "counterfactual", "syntax", "(", "cosy", ")", ",", "includes", "the", "design", "of", "syntax", "-", "aware", "networks", "as", "well", "as", "a", "counterfactual", "training", "method", "to", "implicitly", "force", "the", "networks", "to", "learn", "not", "only", "the", "semantics", "but", "also", "the", "syntax", ".", "to", "evaluate", "cosy", ",", "we", "conduct", "cross", "-", "lingual", "experiments", "on", "natural", "language", "inference", "and", "question", "answering", "using", "mbert", "and", "xlm", "-", "r", "as", "network", "backbones", ".", "our", "results", "show", "that", "cosy", "achieves", "the", "state", "-", "of", "-", "the", "-", "art", "performance", "for", "both", "tasks", ",", "without", "using", "auxiliary", "training", "data", "."]}, {"venue": "ACL", "title": "From text to talk: Harnessing conversational corpora for humane and diversity-aware language technology", "abstract": "Informal social interaction is the primordial home of human language. Linguistically diverse conversational corpora are an important and largely untapped resource for computational linguistics and language technology. Through the efforts of a worldwide language documentation movement, such corpora are increasingly becoming available. We show how interactional data from 63 languages (26 families) harbours insights about turn-taking, timing, sequential structure and social action, with implications for language technology, natural language understanding, and the design of conversational interfaces. Harnessing linguistically diverse conversational corpora will provide the empirical foundations for flexible, localizable, humane language technologies of the future.", "doc_id": "558d5d456931961f1883202a9190c6e5", "publication_year": 2022, "sentences": ["informal social interaction is the primordial home of human language .", "linguistically diverse conversational corpora are an important and largely untapped resource for computational linguistics and language technology .", "through the efforts of a worldwide language documentation movement , such corpora are increasingly becoming available .", "we show how interactional data from 63 languages ( 26 families ) harbours insights about turn - taking , timing , sequential structure and social action , with implications for language technology , natural language understanding , and the design of conversational interfaces .", "harnessing linguistically diverse conversational corpora will provide the empirical foundations for flexible , localizable , humane language technologies of the future ."], "events": [{"event_type": "ITT", "arguments": [{"text": "linguistically diverse conversational corpora", "nugget_type": "DST", "argument_type": "Target", "tokens": ["linguistically", "diverse", "conversational", "corpora"], "offsets": [11, 12, 13, 14]}], "trigger": {"text": "resource", "tokens": ["resource"], "offsets": [21]}}, {"event_type": "FAC", "arguments": [{"text": "harnessing linguistically diverse conversational corpora", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["harnessing", "linguistically", "diverse", "conversational", "corpora"], "offsets": [90, 91, 92, 93, 94]}, {"text": "empirical foundations", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["empirical", "foundations"], "offsets": [98, 99]}, {"text": "flexible language technologies", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["flexible", "language", "technologies"], "offsets": [101, 106, 107]}, {"text": "localizable language technologies", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["localizable", "language", "technologies"], "offsets": [103, 106, 107]}, {"text": "humane language technologies", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["humane", "language", "technologies"], "offsets": [105, 106, 107]}], "trigger": {"text": "provide", "tokens": ["provide"], "offsets": [96]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [46]}, {"text": "insights", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["insights"], "offsets": [59]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [47]}}, {"event_type": "FAC", "arguments": [{"text": "interactional data from 63 languages ( 26 families ) harbours", "nugget_type": "DST", "argument_type": "Subject", "tokens": ["interactional", "data", "from", "63", "languages", "(", "26", "families", ")", "harbours"], "offsets": [49, 50, 51, 52, 53, 54, 55, 56, 57, 58]}, {"text": "turn - taking", "nugget_type": "MOD", "argument_type": "Object", "tokens": ["turn", "-", "taking"], "offsets": [61, 62, 63]}, {"text": "timing", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["timing"], "offsets": [65]}, {"text": "sequential structure", "nugget_type": "APP", "argument_type": "Object", "tokens": ["sequential", "structure"], "offsets": [67, 68]}, {"text": "social action", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["social", "action"], "offsets": [70, 71]}, {"text": "implications for language technology", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["implications", "for", "language", "technology"], "offsets": [74, 75, 76, 77]}, {"text": "design of conversational interfaces", "nugget_type": "APP", "argument_type": "Object", "tokens": ["design", "of", "conversational", "interfaces"], "offsets": [85, 86, 87, 88]}, {"text": "natural language understanding", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["natural", "language", "understanding"], "offsets": [79, 80, 81]}], "trigger": {"text": "insights", "tokens": ["insights"], "offsets": [59]}}], "document": ["informal", "social", "interaction", "is", "the", "primordial", "home", "of", "human", "language", ".", "linguistically", "diverse", "conversational", "corpora", "are", "an", "important", "and", "largely", "untapped", "resource", "for", "computational", "linguistics", "and", "language", "technology", ".", "through", "the", "efforts", "of", "a", "worldwide", "language", "documentation", "movement", ",", "such", "corpora", "are", "increasingly", "becoming", "available", ".", "we", "show", "how", "interactional", "data", "from", "63", "languages", "(", "26", "families", ")", "harbours", "insights", "about", "turn", "-", "taking", ",", "timing", ",", "sequential", "structure", "and", "social", "action", ",", "with", "implications", "for", "language", "technology", ",", "natural", "language", "understanding", ",", "and", "the", "design", "of", "conversational", "interfaces", ".", "harnessing", "linguistically", "diverse", "conversational", "corpora", "will", "provide", "the", "empirical", "foundations", "for", "flexible", ",", "localizable", ",", "humane", "language", "technologies", "of", "the", "future", "."]}, {"venue": "ACL", "title": "Show Me More Details: Discovering Hierarchies of Procedures from Semi-structured Web Data", "abstract": "Procedures are inherently hierarchical. To \u201cmake videos\u201d, one may need to \u201cpurchase a camera\u201d, which in turn may require one to \u201cset a budget\u201d. While such hierarchical knowledge is critical for reasoning about complex procedures, most existing work has treated procedures as shallow structures without modeling the parent-child relation. In this work, we attempt to construct an open-domain hierarchical knowledge-base (KB) of procedures based on wikiHow, a website containing more than 110k instructional articles, each documenting the steps to carry out a complex procedure. To this end, we develop a simple and efficient method that links steps (e.g., \u201cpurchase a camera\u201d) in an article to other articles with similar goals (e.g., \u201chow to choose a camera\u201d), recursively constructing the KB. Our method significantly outperforms several strong baselines according to automatic evaluation, human judgment, and application to downstream tasks such as instructional video retrieval.", "doc_id": "f9274cea2b014196d821cf322b6d4bbc", "publication_year": 2022, "sentences": ["procedures are inherently hierarchical .", "to \u201c make videos \u201d , one may need to \u201c purchase a camera \u201d , which in turn may require one to \u201c set a budget \u201d .", "while such hierarchical knowledge is critical for reasoning about complex procedures , most existing work has treated procedures as shallow structures without modeling the parent - child relation .", "in this work , we attempt to construct an open - domain hierarchical knowledge - base ( kb ) of procedures based on wikihow , a website containing more than 110k instructional articles , each documenting the steps to carry out a complex procedure .", "to this end , we develop a simple and efficient method that links steps ( e . g . , \u201c purchase a camera \u201d ) in an article to other articles with similar goals ( e . g . , \u201c how to choose a camera \u201d ) , recursively constructing the kb .", "our method significantly outperforms several strong baselines according to automatic evaluation , human judgment , and application to downstream tasks such as instructional video retrieval ."], "events": [{"event_type": "RWF", "arguments": [{"text": "existing work", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["existing", "work"], "offsets": [47, 48]}, {"text": "without modeling the parent - child relation", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["without", "modeling", "the", "parent", "-", "child", "relation"], "offsets": [55, 56, 57, 58, 59, 60, 61]}], "trigger": {"text": "treated", "tokens": ["treated"], "offsets": [50]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [67]}, {"text": "open - domain hierarchical knowledge - base ( kb ) of procedures based on wikihow", "nugget_type": "DST", "argument_type": "Content", "tokens": ["open", "-", "domain", "hierarchical", "knowledge", "-", "base", "of", "procedures", "based", "on", "wikihow"], "offsets": [72, 73, 74, 75, 76, 77, 78, 82, 83, 84, 85, 86]}], "trigger": {"text": "construct", "tokens": ["construct"], "offsets": [70]}}, {"event_type": "MDS", "arguments": [{"text": "open - domain hierarchical knowledge - base ( kb ) of procedures", "nugget_type": "DST", "argument_type": "BaseComponent", "tokens": ["open", "-", "domain", "hierarchical", "knowledge", "-", "base", "of", "procedures"], "offsets": [72, 73, 74, 75, 76, 77, 78, 82, 83]}, {"text": "more than 110k instructional articles", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["more", "than", "110k", "instructional", "articles"], "offsets": [91, 92, 93, 94, 95]}, {"text": "documenting the steps to carry out a complex procedure", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["documenting", "the", "steps", "to", "carry", "out", "a", "complex", "procedure"], "offsets": [98, 99, 100, 101, 102, 103, 104, 105, 106]}], "trigger": {"text": "containing", "tokens": ["containing"], "offsets": [90]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [112]}, {"text": "simple and efficient method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["simple", "and", "efficient", "method"], "offsets": [115, 116, 117, 118]}], "trigger": {"text": "develop", "tokens": ["develop"], "offsets": [113]}}, {"event_type": "MDS", "arguments": [{"text": "steps", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["steps"], "offsets": [121]}, {"text": "in an article", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "an", "article"], "offsets": [134, 135, 136]}, {"text": "other articles", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["other", "articles"], "offsets": [138, 139]}, {"text": "with similar goals", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "similar", "goals"], "offsets": [140, 141, 142]}, {"text": "recursively constructing", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["recursively", "constructing"], "offsets": [158, 159]}], "trigger": {"text": "links", "tokens": ["links"], "offsets": [120]}}, {"event_type": "PUR", "arguments": [{"text": "kb", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["knowledge", "-", "base"], "offsets": [76, 77, 78]}], "trigger": {"text": "recursively constructing", "tokens": ["recursively", "constructing"], "offsets": [158, 159]}}, {"event_type": "CMP", "arguments": [{"text": "simple and efficient method", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["simple", "and", "efficient", "method"], "offsets": [115, 116, 117, 118]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [166]}, {"text": "several strong baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["several", "strong", "baselines"], "offsets": [167, 168, 169]}, {"text": "automatic evaluation", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["automatic", "evaluation"], "offsets": [172, 173]}, {"text": "human judgment", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["human", "judgment"], "offsets": [175, 176]}, {"text": "application to downstream tasks", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["application", "to", "downstream", "tasks"], "offsets": [179, 180, 181, 182]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [166]}}], "document": ["procedures", "are", "inherently", "hierarchical", ".", "to", "\u201c", "make", "videos", "\u201d", ",", "one", "may", "need", "to", "\u201c", "purchase", "a", "camera", "\u201d", ",", "which", "in", "turn", "may", "require", "one", "to", "\u201c", "set", "a", "budget", "\u201d", ".", "while", "such", "hierarchical", "knowledge", "is", "critical", "for", "reasoning", "about", "complex", "procedures", ",", "most", "existing", "work", "has", "treated", "procedures", "as", "shallow", "structures", "without", "modeling", "the", "parent", "-", "child", "relation", ".", "in", "this", "work", ",", "we", "attempt", "to", "construct", "an", "open", "-", "domain", "hierarchical", "knowledge", "-", "base", "(", "kb", ")", "of", "procedures", "based", "on", "wikihow", ",", "a", "website", "containing", "more", "than", "110k", "instructional", "articles", ",", "each", "documenting", "the", "steps", "to", "carry", "out", "a", "complex", "procedure", ".", "to", "this", "end", ",", "we", "develop", "a", "simple", "and", "efficient", "method", "that", "links", "steps", "(", "e", ".", "g", ".", ",", "\u201c", "purchase", "a", "camera", "\u201d", ")", "in", "an", "article", "to", "other", "articles", "with", "similar", "goals", "(", "e", ".", "g", ".", ",", "\u201c", "how", "to", "choose", "a", "camera", "\u201d", ")", ",", "recursively", "constructing", "the", "kb", ".", "our", "method", "significantly", "outperforms", "several", "strong", "baselines", "according", "to", "automatic", "evaluation", ",", "human", "judgment", ",", "and", "application", "to", "downstream", "tasks", "such", "as", "instructional", "video", "retrieval", "."]}, {"venue": "ACL", "title": "Unsupervised Cross-Domain Prerequisite Chain Learning using Variational Graph Autoencoders", "abstract": "Learning prerequisite chains is an important task for one to pick up knowledge efficiently in both known and unknown domains. For example, one may be an expert in the natural language processing (NLP) domain, but want to determine the best order in which to learn new concepts in an unfamiliar Computer Vision domain (CV). Both domains share some common concepts, such as machine learning basics and deep learning models. In this paper, we solve the task of unsupervised cross-domain concept prerequisite chain learning, using an optimized variational graph autoencoder. Our model learns to transfer concept prerequisite relations from an information-rich domain (source domain) to an information-poor domain (target domain), substantially surpassing other baseline models. In addition, we expand an existing dataset by introducing two new domains\u2014-CV and Bioinformatics (BIO). The annotated data and resources as well as the code will be made publicly available.", "doc_id": "e18794390902122f591156ab72e7f6df", "publication_year": 2021, "sentences": ["learning prerequisite chains is an important task for one to pick up knowledge efficiently in both known and unknown domains .", "for example , one may be an expert in the natural language processing ( nlp ) domain , but want to determine the best order in which to learn new concepts in an unfamiliar computer vision domain ( cv ) .", "both domains share some common concepts , such as machine learning basics and deep learning models .", "in this paper , we solve the task of unsupervised cross - domain concept prerequisite chain learning , using an optimized variational graph autoencoder .", "our model learns to transfer concept prerequisite relations from an information - rich domain ( source domain ) to an information - poor domain ( target domain ) , substantially surpassing other baseline models .", "in addition , we expand an existing dataset by introducing two new domains \u2014 - cv and bioinformatics ( bio ) .", "the annotated data and resources as well as the code will be made publicly available ."], "events": [{"event_type": "ITT", "arguments": [{"text": "learning prerequisite chains", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["learning", "prerequisite", "chains"], "offsets": [0, 1, 2]}], "trigger": {"text": "important task", "tokens": ["important", "task"], "offsets": [5, 6]}}, {"event_type": "MDS", "arguments": [{"text": "optimized variational graph autoencoder", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["optimized", "variational", "graph", "autoencoder"], "offsets": [99, 100, 101, 102]}, {"text": "solve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["solve"], "offsets": [84]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [97]}}, {"event_type": "PUR", "arguments": [{"text": "task of unsupervised cross - domain concept prerequisite chain learning", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["task", "of", "unsupervised", "cross", "-", "domain", "concept", "prerequisite", "chain", "learning"], "offsets": [86, 87, 88, 89, 90, 91, 92, 93, 94, 95]}], "trigger": {"text": "solve", "tokens": ["solve"], "offsets": [84]}}, {"event_type": "CMP", "arguments": [{"text": "substantially surpassing", "nugget_type": "STR", "argument_type": "Result", "tokens": ["substantially", "surpassing"], "offsets": [133, 134]}, {"text": "other baseline models", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["other", "baseline", "models"], "offsets": [135, 136, 137]}], "trigger": {"text": "substantially surpassing", "tokens": ["substantially", "surpassing"], "offsets": [133, 134]}}, {"event_type": "MDS", "arguments": [{"text": "concept prerequisite relations from an information - rich domain", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["concept", "prerequisite", "relations", "from", "an", "information", "-", "rich", "domain"], "offsets": [109, 110, 111, 112, 113, 114, 115, 116, 117]}, {"text": "information - poor domain", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["information", "-", "poor", "domain"], "offsets": [124, 125, 126, 127]}], "trigger": {"text": "learns to transfer", "tokens": ["learns", "to", "transfer"], "offsets": [106, 107, 108]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [142]}, {"text": "cv", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["cv"], "offsets": [154]}, {"text": "bioinformatics", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["bioinformatics"], "offsets": [156]}, {"text": "expand", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["expand"], "offsets": [143]}], "trigger": {"text": "introducing", "tokens": ["introducing"], "offsets": [148]}}, {"event_type": "PUR", "arguments": [{"text": "existing dataset", "nugget_type": "DST", "argument_type": "Aim", "tokens": ["existing", "dataset"], "offsets": [145, 146]}], "trigger": {"text": "expand", "tokens": ["expand"], "offsets": [143]}}], "document": ["learning", "prerequisite", "chains", "is", "an", "important", "task", "for", "one", "to", "pick", "up", "knowledge", "efficiently", "in", "both", "known", "and", "unknown", "domains", ".", "for", "example", ",", "one", "may", "be", "an", "expert", "in", "the", "natural", "language", "processing", "(", "nlp", ")", "domain", ",", "but", "want", "to", "determine", "the", "best", "order", "in", "which", "to", "learn", "new", "concepts", "in", "an", "unfamiliar", "computer", "vision", "domain", "(", "cv", ")", ".", "both", "domains", "share", "some", "common", "concepts", ",", "such", "as", "machine", "learning", "basics", "and", "deep", "learning", "models", ".", "in", "this", "paper", ",", "we", "solve", "the", "task", "of", "unsupervised", "cross", "-", "domain", "concept", "prerequisite", "chain", "learning", ",", "using", "an", "optimized", "variational", "graph", "autoencoder", ".", "our", "model", "learns", "to", "transfer", "concept", "prerequisite", "relations", "from", "an", "information", "-", "rich", "domain", "(", "source", "domain", ")", "to", "an", "information", "-", "poor", "domain", "(", "target", "domain", ")", ",", "substantially", "surpassing", "other", "baseline", "models", ".", "in", "addition", ",", "we", "expand", "an", "existing", "dataset", "by", "introducing", "two", "new", "domains", "\u2014", "-", "cv", "and", "bioinformatics", "(", "bio", ")", ".", "the", "annotated", "data", "and", "resources", "as", "well", "as", "the", "code", "will", "be", "made", "publicly", "available", "."]}, {"venue": "ACL", "title": "Heterogeneous Graph Transformer for Graph-to-Sequence Learning", "abstract": "The graph-to-sequence (Graph2Seq) learning aims to transduce graph-structured representations to word sequences for text generation. Recent studies propose various models to encode graph structure. However, most previous works ignore the indirect relations between distance nodes, or treat indirect relations and direct relations in the same way. In this paper, we propose the Heterogeneous Graph Transformer to independently model the different relations in the individual subgraphs of the original graph, including direct relations, indirect relations and multiple possible relations between nodes. Experimental results show that our model strongly outperforms the state of the art on all four standard benchmarks of AMR-to-text generation and syntax-based neural machine translation.", "doc_id": "3dee8bd18d3d724e28ee786e4b6e9313", "publication_year": 2020, "sentences": ["the graph - to - sequence ( graph2seq ) learning aims to transduce graph - structured representations to word sequences for text generation .", "recent studies propose various models to encode graph structure .", "however , most previous works ignore the indirect relations between distance nodes , or treat indirect relations and direct relations in the same way .", "in this paper , we propose the heterogeneous graph transformer to independently model the different relations in the individual subgraphs of the original graph , including direct relations , indirect relations and multiple possible relations between nodes .", "experimental results show that our model strongly outperforms the state of the art on all four standard benchmarks of amr - to - text generation and syntax - based neural machine translation ."], "events": [{"event_type": "ITT", "arguments": [{"text": "graph - to - sequence ( graph2seq ) learning", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["graph", "-", "to", "-", "sequence", "(", "graph2seq", ")", "learning"], "offsets": [1, 2, 3, 4, 5, 6, 7, 8, 9]}], "trigger": {"text": "aims", "tokens": ["aims"], "offsets": [10]}}, {"event_type": "RWS", "arguments": [{"text": "various models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["various", "models"], "offsets": [27, 28]}, {"text": "encode", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["encode"], "offsets": [30]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [26]}}, {"event_type": "PUR", "arguments": [{"text": "graph structure", "nugget_type": "MOD", "argument_type": "Aim", "tokens": ["graph", "structure"], "offsets": [31, 32]}], "trigger": {"text": "encode", "tokens": ["encode"], "offsets": [30]}}, {"event_type": "RWF", "arguments": [{"text": "most previous works", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["most", "previous", "works"], "offsets": [36, 37, 38]}, {"text": "indirect relations between distance nodes", "nugget_type": "FEA", "argument_type": "Fault", "tokens": ["indirect", "relations", "between", "distance", "nodes"], "offsets": [41, 42, 43, 44, 45]}], "trigger": {"text": "ignore", "tokens": ["ignore"], "offsets": [39]}}, {"event_type": "RWF", "arguments": [{"text": "indirect relations", "nugget_type": "FEA", "argument_type": "Fault", "tokens": ["indirect", "relations"], "offsets": [49, 50]}, {"text": "direct relations", "nugget_type": "FEA", "argument_type": "Fault", "tokens": ["direct", "relations"], "offsets": [52, 53]}, {"text": "in the same way", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "same", "way"], "offsets": [54, 55, 56, 57]}], "trigger": {"text": "treat", "tokens": ["treat"], "offsets": [48]}}, {"event_type": "PRP", "arguments": [{"text": "independently model", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["independently", "model"], "offsets": [70, 71]}, {"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [63]}, {"text": "heterogeneous graph transformer", "nugget_type": "APP", "argument_type": "Content", "tokens": ["heterogeneous", "graph", "transformer"], "offsets": [66, 67, 68]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [64]}}, {"event_type": "PUR", "arguments": [{"text": "direct relations between nodes", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["direct", "relations", "between", "nodes"], "offsets": [85, 86, 94, 95]}, {"text": "indirect relations between nodes", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["indirect", "relations", "between", "nodes"], "offsets": [88, 89, 94, 95]}, {"text": "multiple possible relations between nodes", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["multiple", "possible", "relations", "between", "nodes"], "offsets": [91, 92, 93, 94, 95]}], "trigger": {"text": "independently model", "tokens": ["independently", "model"], "offsets": [70, 71]}}, {"event_type": "FIN", "arguments": [{"text": "strongly outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["strongly", "outperforms"], "offsets": [103, 104]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [99]}}, {"event_type": "CMP", "arguments": [{"text": "heterogeneous graph transformer", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["heterogeneous", "graph", "transformer"], "offsets": [66, 67, 68]}, {"text": "strongly outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["strongly", "outperforms"], "offsets": [103, 104]}, {"text": "state of the art", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "of", "the", "art"], "offsets": [106, 107, 108, 109]}], "trigger": {"text": "strongly outperforms", "tokens": ["strongly", "outperforms"], "offsets": [103, 104]}}], "document": ["the", "graph", "-", "to", "-", "sequence", "(", "graph2seq", ")", "learning", "aims", "to", "transduce", "graph", "-", "structured", "representations", "to", "word", "sequences", "for", "text", "generation", ".", "recent", "studies", "propose", "various", "models", "to", "encode", "graph", "structure", ".", "however", ",", "most", "previous", "works", "ignore", "the", "indirect", "relations", "between", "distance", "nodes", ",", "or", "treat", "indirect", "relations", "and", "direct", "relations", "in", "the", "same", "way", ".", "in", "this", "paper", ",", "we", "propose", "the", "heterogeneous", "graph", "transformer", "to", "independently", "model", "the", "different", "relations", "in", "the", "individual", "subgraphs", "of", "the", "original", "graph", ",", "including", "direct", "relations", ",", "indirect", "relations", "and", "multiple", "possible", "relations", "between", "nodes", ".", "experimental", "results", "show", "that", "our", "model", "strongly", "outperforms", "the", "state", "of", "the", "art", "on", "all", "four", "standard", "benchmarks", "of", "amr", "-", "to", "-", "text", "generation", "and", "syntax", "-", "based", "neural", "machine", "translation", "."]}, {"venue": "ACL", "title": "What are the Goals of Distributional Semantics?", "abstract": "Distributional semantic models have become a mainstay in NLP, providing useful features for downstream tasks. However, assessing long-term progress requires explicit long-term goals. In this paper, I take a broad linguistic perspective, looking at how well current models can deal with various semantic challenges. Given stark differences between models proposed in different subfields, a broad perspective is needed to see how we could integrate them. I conclude that, while linguistic insights can guide the design of model architectures, future progress will require balancing the often conflicting demands of linguistic expressiveness and computational tractability.", "doc_id": "ce81ffe77c8bd8e218a633a1562936b3", "publication_year": 2020, "sentences": ["distributional semantic models have become a mainstay in nlp , providing useful features for downstream tasks .", "however , assessing long - term progress requires explicit long - term goals .", "in this paper , i take a broad linguistic perspective , looking at how well current models can deal with various semantic challenges .", "given stark differences between models proposed in different subfields , a broad perspective is needed to see how we could integrate them .", "i conclude that , while linguistic insights can guide the design of model architectures , future progress will require balancing the often conflicting demands of linguistic expressiveness and computational tractability ."], "events": [{"event_type": "ITT", "arguments": [{"text": "distributional semantic models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["distributional", "semantic", "models"], "offsets": [0, 1, 2]}], "trigger": {"text": "become", "tokens": ["become"], "offsets": [4]}}, {"event_type": "WKS", "arguments": [{"text": "take a broad linguistic perspective", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["take", "a", "broad", "linguistic", "perspective"], "offsets": [36, 37, 38, 39, 40]}, {"text": "i", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["i"], "offsets": [35]}, {"text": "current models", "nugget_type": "APP", "argument_type": "Content", "tokens": ["current", "models"], "offsets": [46, 47]}], "trigger": {"text": "looking", "tokens": ["looking"], "offsets": [42]}}, {"event_type": "WKS", "arguments": [{"text": "given stark differences between models proposed in different subfields", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["given", "stark", "differences", "between", "models", "proposed", "in", "different", "subfields"], "offsets": [55, 56, 57, 58, 59, 60, 61, 62, 63]}, {"text": "broad perspective", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["broad", "perspective"], "offsets": [66, 67]}, {"text": "see", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["see"], "offsets": [71]}], "trigger": {"text": "needed", "tokens": ["needed"], "offsets": [69]}}, {"event_type": "PUR", "arguments": [{"text": "how we could integrate them", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["how", "we", "could", "integrate", "them"], "offsets": [72, 73, 74, 75, 76]}], "trigger": {"text": "see", "tokens": ["see"], "offsets": [71]}}, {"event_type": "FAC", "arguments": [{"text": "linguistic insights", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["linguistic", "insights"], "offsets": [83, 84]}, {"text": "design of model architectures", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["design", "of", "model", "architectures"], "offsets": [88, 89, 90, 91]}], "trigger": {"text": "guide", "tokens": ["guide"], "offsets": [86]}}], "document": ["distributional", "semantic", "models", "have", "become", "a", "mainstay", "in", "nlp", ",", "providing", "useful", "features", "for", "downstream", "tasks", ".", "however", ",", "assessing", "long", "-", "term", "progress", "requires", "explicit", "long", "-", "term", "goals", ".", "in", "this", "paper", ",", "i", "take", "a", "broad", "linguistic", "perspective", ",", "looking", "at", "how", "well", "current", "models", "can", "deal", "with", "various", "semantic", "challenges", ".", "given", "stark", "differences", "between", "models", "proposed", "in", "different", "subfields", ",", "a", "broad", "perspective", "is", "needed", "to", "see", "how", "we", "could", "integrate", "them", ".", "i", "conclude", "that", ",", "while", "linguistic", "insights", "can", "guide", "the", "design", "of", "model", "architectures", ",", "future", "progress", "will", "require", "balancing", "the", "often", "conflicting", "demands", "of", "linguistic", "expressiveness", "and", "computational", "tractability", "."]}, {"venue": "ACL", "title": "A Multi-Document Coverage Reward for RELAXed Multi-Document Summarization", "abstract": "Multi-document summarization (MDS) has made significant progress in recent years, in part facilitated by the availability of new, dedicated datasets and capacious language models. However, a standing limitation of these models is that they are trained against limited references and with plain maximum-likelihood objectives. As for many other generative tasks, reinforcement learning (RL) offers the potential to improve the training of MDS models; yet, it requires a carefully-designed reward that can ensure appropriate leverage of both the reference summaries and the input documents. For this reason, in this paper we propose fine-tuning an MDS baseline with a reward that balances a reference-based metric such as ROUGE with coverage of the input documents. To implement the approach, we utilize RELAX (Grathwohl et al., 2018), a contemporary gradient estimator which is both low-variance and unbiased, and we fine-tune the baseline in a few-shot style for both stability and computational efficiency. Experimental results over the Multi-News and WCEP MDS datasets show significant improvements of up to +0.95 pp average ROUGE score and +3.17 pp METEOR score over the baseline, and competitive results with the literature. In addition, they show that the coverage of the input documents is increased, and evenly across all documents.", "doc_id": "b8d320f05ca14d9bf35ac2be5752202c", "publication_year": 2022, "sentences": ["multi - document summarization ( mds ) has made significant progress in recent years , in part facilitated by the availability of new , dedicated datasets and capacious language models .", "however , a standing limitation of these models is that they are trained against limited references and with plain maximum - likelihood objectives .", "as for many other generative tasks , reinforcement learning ( rl ) offers the potential to improve the training of mds models ; yet , it requires a carefully - designed reward that can ensure appropriate leverage of both the reference summaries and the input documents .", "for this reason , in this paper we propose fine - tuning an mds baseline with a reward that balances a reference - based metric such as rouge with coverage of the input documents .", "to implement the approach , we utilize relax ( grathwohl et al . , 2018 ) , a contemporary gradient estimator which is both low - variance and unbiased , and we fine - tune the baseline in a few - shot style for both stability and computational efficiency .", "experimental results over the multi - news and wcep mds datasets show significant improvements of up to + 0 . 95 pp average rouge score and + 3 . 17 pp meteor score over the baseline , and competitive results with the literature .", "in addition , they show that the coverage of the input documents is increased , and evenly across all documents ."], "events": [{"event_type": "ITT", "arguments": [{"text": "multi - document summarization", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["multi", "-", "document", "summarization"], "offsets": [0, 1, 2, 3]}], "trigger": {"text": "made", "tokens": ["made"], "offsets": [8]}}, {"event_type": "RWF", "arguments": [{"text": "capacious language models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["capacious", "language", "models"], "offsets": [27, 28, 29]}, {"text": "standing limitation", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["standing", "limitation"], "offsets": [34, 35]}, {"text": "limited references", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["limited", "references"], "offsets": [45, 46]}, {"text": "with plain maximum - likelihood objectives", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "plain", "maximum", "-", "likelihood", "objectives"], "offsets": [48, 49, 50, 51, 52, 53]}], "trigger": {"text": "trained", "tokens": ["trained"], "offsets": [43]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [109]}, {"text": "fine - tuning an mds baseline with a reward", "nugget_type": "APP", "argument_type": "Content", "tokens": ["fine", "-", "tuning", "an", "multi", "-", "document", "summarization", "baseline", "with", "a", "reward"], "offsets": [111, 112, 113, 114, 0, 1, 2, 3, 116, 117, 118, 119]}, {"text": "balances", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["balances"], "offsets": [121]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [110]}}, {"event_type": "PUR", "arguments": [{"text": "reference - based metric", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["reference", "-", "based", "metric"], "offsets": [123, 124, 125, 126]}], "trigger": {"text": "balances", "tokens": ["balances"], "offsets": [121]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [142]}, {"text": "relax", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["relax"], "offsets": [144]}, {"text": "computational efficiency", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["computational", "efficiency"], "offsets": [184, 185]}, {"text": "fine - tune", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["fine", "-", "tune"], "offsets": [169, 170, 171]}, {"text": "stability", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["stability"], "offsets": [182]}], "trigger": {"text": "utilize", "tokens": ["utilize"], "offsets": [143]}}, {"event_type": "PUR", "arguments": [{"text": "baseline", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["fine", "-", "tuning", "an", "multi", "-", "document", "summarization", "baseline"], "offsets": [111, 112, 113, 114, 0, 1, 2, 3, 116]}, {"text": "in a few - shot style", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "a", "few", "-", "shot", "style"], "offsets": [174, 175, 176, 177, 178, 179]}], "trigger": {"text": "fine - tune", "tokens": ["fine", "-", "tune"], "offsets": [169, 170, 171]}}, {"event_type": "CMP", "arguments": [{"text": "multi - news", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["multi", "-", "news"], "offsets": [191, 192, 193]}, {"text": "wcep mds datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["wcep", "mds", "datasets"], "offsets": [195, 196, 197]}, {"text": "significant", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significant"], "offsets": [199]}, {"text": "improvements", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improvements"], "offsets": [200]}, {"text": "+ 0 . 95", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["+", "0", ".", "95"], "offsets": [204, 205, 206, 207]}, {"text": "pp average rouge score", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["pp", "average", "rouge", "score"], "offsets": [208, 209, 210, 211]}, {"text": "baseline", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["baseline"], "offsets": [222]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [198]}}, {"event_type": "CMP", "arguments": [{"text": "multi - news", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["multi", "-", "news"], "offsets": [191, 192, 193]}, {"text": "wcep mds datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["wcep", "mds", "datasets"], "offsets": [195, 196, 197]}, {"text": "significant", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significant"], "offsets": [199]}, {"text": "improvements", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improvements"], "offsets": [200]}, {"text": "+ 3 . 17", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["+", "3", ".", "17"], "offsets": [213, 214, 215, 216]}, {"text": "pp meteor score", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["pp", "meteor", "score"], "offsets": [217, 218, 219]}, {"text": "baseline", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["baseline"], "offsets": [222]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [198]}}, {"event_type": "FAC", "arguments": [{"text": "multi - news", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["multi", "-", "news"], "offsets": [191, 192, 193]}, {"text": "wcep mds datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["wcep", "mds", "datasets"], "offsets": [195, 196, 197]}, {"text": "competitive results", "nugget_type": "STR", "argument_type": "Object", "tokens": ["competitive", "results"], "offsets": [225, 226]}, {"text": "with the literature", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "the", "literature"], "offsets": [227, 228, 229]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [198]}}, {"event_type": "FIN", "arguments": [{"text": "increased", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["increased"], "offsets": [244]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [235]}}, {"event_type": "FAC", "arguments": [{"text": "coverage of the input documents", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["coverage", "of", "the", "input", "documents"], "offsets": [238, 239, 240, 241, 242]}, {"text": "evenly across all documents", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["evenly", "across", "all", "documents"], "offsets": [247, 248, 249, 250]}], "trigger": {"text": "increased", "tokens": ["increased"], "offsets": [244]}}], "document": ["multi", "-", "document", "summarization", "(", "mds", ")", "has", "made", "significant", "progress", "in", "recent", "years", ",", "in", "part", "facilitated", "by", "the", "availability", "of", "new", ",", "dedicated", "datasets", "and", "capacious", "language", "models", ".", "however", ",", "a", "standing", "limitation", "of", "these", "models", "is", "that", "they", "are", "trained", "against", "limited", "references", "and", "with", "plain", "maximum", "-", "likelihood", "objectives", ".", "as", "for", "many", "other", "generative", "tasks", ",", "reinforcement", "learning", "(", "rl", ")", "offers", "the", "potential", "to", "improve", "the", "training", "of", "mds", "models", ";", "yet", ",", "it", "requires", "a", "carefully", "-", "designed", "reward", "that", "can", "ensure", "appropriate", "leverage", "of", "both", "the", "reference", "summaries", "and", "the", "input", "documents", ".", "for", "this", "reason", ",", "in", "this", "paper", "we", "propose", "fine", "-", "tuning", "an", "mds", "baseline", "with", "a", "reward", "that", "balances", "a", "reference", "-", "based", "metric", "such", "as", "rouge", "with", "coverage", "of", "the", "input", "documents", ".", "to", "implement", "the", "approach", ",", "we", "utilize", "relax", "(", "grathwohl", "et", "al", ".", ",", "2018", ")", ",", "a", "contemporary", "gradient", "estimator", "which", "is", "both", "low", "-", "variance", "and", "unbiased", ",", "and", "we", "fine", "-", "tune", "the", "baseline", "in", "a", "few", "-", "shot", "style", "for", "both", "stability", "and", "computational", "efficiency", ".", "experimental", "results", "over", "the", "multi", "-", "news", "and", "wcep", "mds", "datasets", "show", "significant", "improvements", "of", "up", "to", "+", "0", ".", "95", "pp", "average", "rouge", "score", "and", "+", "3", ".", "17", "pp", "meteor", "score", "over", "the", "baseline", ",", "and", "competitive", "results", "with", "the", "literature", ".", "in", "addition", ",", "they", "show", "that", "the", "coverage", "of", "the", "input", "documents", "is", "increased", ",", "and", "evenly", "across", "all", "documents", "."]}, {"venue": "ACL", "title": "Toxicity Detection: Does Context Really Matter?", "abstract": "Moderation is crucial to promoting healthy online discussions. Although several \u2018toxicity\u2019 detection datasets and models have been published, most of them ignore the context of the posts, implicitly assuming that comments may be judged independently. We investigate this assumption by focusing on two questions: (a) does context affect the human judgement, and (b) does conditioning on context improve performance of toxicity detection systems? We experiment with Wikipedia conversations, limiting the notion of context to the previous post in the thread and the discussion title. We find that context can both amplify or mitigate the perceived toxicity of posts. Moreover, a small but significant subset of manually labeled posts (5% in one of our experiments) end up having the opposite toxicity labels if the annotators are not provided with context. Surprisingly, we also find no evidence that context actually improves the performance of toxicity classifiers, having tried a range of classifiers and mechanisms to make them context aware. This points to the need for larger datasets of comments annotated in context. We make our code and data publicly available.", "doc_id": "a5a921387ba6c34b070d8c55367c0cbe", "publication_year": 2020, "sentences": ["moderation is crucial to promoting healthy online discussions .", "although several \u2018 toxicity \u2019 detection datasets and models have been published , most of them ignore the context of the posts , implicitly assuming that comments may be judged independently .", "we investigate this assumption by focusing on two questions : ( a ) does context affect the human judgement , and ( b ) does conditioning on context improve performance of toxicity detection systems ?", "we experiment with wikipedia conversations , limiting the notion of context to the previous post in the thread and the discussion title .", "we find that context can both amplify or mitigate the perceived toxicity of posts .", "moreover , a small but significant subset of manually labeled posts ( 5 % in one of our experiments ) end up having the opposite toxicity labels if the annotators are not provided with context .", "surprisingly , we also find no evidence that context actually improves the performance of toxicity classifiers , having tried a range of classifiers and mechanisms to make them context aware .", "this points to the need for larger datasets of comments annotated in context .", "we make our code and data publicly available ."], "events": [{"event_type": "ITT", "arguments": [{"text": "healthy online discussions", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["healthy", "online", "discussions"], "offsets": [5, 6, 7]}], "trigger": {"text": "crucial", "tokens": ["crucial"], "offsets": [2]}}, {"event_type": "RWF", "arguments": [{"text": "ignore", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["ignore"], "offsets": [25]}, {"text": "most of them", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["most", "of", "several", "\u2018", "toxicity", "\u2019", "detection", "datasets", "and", "models"], "offsets": [22, 23, 10, 11, 12, 13, 14, 15, 16, 17]}], "trigger": {"text": "ignore", "tokens": ["ignore"], "offsets": [25]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [41]}, {"text": "investigate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["investigate"], "offsets": [42]}, {"text": "context affect the human judgement", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["context", "affect", "the", "human", "judgement"], "offsets": [55, 56, 57, 58, 59]}, {"text": "conditioning on context improve performance of toxicity detection systems", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["conditioning", "on", "context", "improve", "performance", "of", "toxicity", "detection", "systems"], "offsets": [66, 67, 68, 69, 70, 71, 72, 73, 74]}], "trigger": {"text": "focusing on", "tokens": ["focusing", "on"], "offsets": [46, 47]}}, {"event_type": "PUR", "arguments": [{"text": "this assumption", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["comments", "may", "be", "judged", "independently"], "offsets": [35, 36, 37, 38, 39]}], "trigger": {"text": "investigate", "tokens": ["investigate"], "offsets": [42]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [76]}, {"text": "wikipedia conversations", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["wikipedia", "conversations"], "offsets": [79, 80]}], "trigger": {"text": "experiment", "tokens": ["experiment"], "offsets": [77]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [99]}, {"text": "amplify or mitigate", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["amplify", "or", "mitigate"], "offsets": [105, 106, 107]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [100]}}, {"event_type": "FAC", "arguments": [{"text": "context", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["context"], "offsets": [102]}, {"text": "perceived toxicity of posts", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["perceived", "toxicity", "of", "posts"], "offsets": [109, 110, 111, 112]}], "trigger": {"text": "amplify or mitigate", "tokens": ["amplify", "or", "mitigate"], "offsets": [105, 106, 107]}}, {"event_type": "FAC", "arguments": [{"text": "if the annotators are not provided with context", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["if", "the", "annotators", "are", "not", "provided", "with", "context"], "offsets": [141, 142, 143, 144, 145, 146, 147, 148]}, {"text": "opposite toxicity labels", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["opposite", "toxicity", "labels"], "offsets": [138, 139, 140]}, {"text": "subset of manually labeled posts", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["subset", "of", "manually", "labeled", "posts"], "offsets": [120, 121, 122, 123, 124]}], "trigger": {"text": "having", "tokens": ["having"], "offsets": [136]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [152]}, {"text": "classifiers", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["classifiers"], "offsets": [172]}, {"text": "mechanisms", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["mechanisms"], "offsets": [174]}, {"text": "context aware", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["context", "aware"], "offsets": [178, 179]}], "trigger": {"text": "make", "tokens": ["make"], "offsets": [176]}}, {"event_type": "MDS", "arguments": [{"text": "notion of context", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["notion", "of", "context"], "offsets": [84, 85, 86]}, {"text": "discussion title", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["discussion", "title"], "offsets": [96, 97]}, {"text": "previous post", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["previous", "post"], "offsets": [89, 90]}], "trigger": {"text": "limiting", "tokens": ["limiting"], "offsets": [82]}}, {"event_type": "FAC", "arguments": [{"text": "no evidence that context actually improves the performance of toxicity classifiers", "nugget_type": "WEA", "argument_type": "Subject", "tokens": ["no", "evidence", "that", "context", "actually", "improves", "the", "performance", "of", "toxicity", "classifiers"], "offsets": [155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [154]}}, {"event_type": "FAC", "arguments": [{"text": "context", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["context"], "offsets": [193]}, {"text": "larger datasets of comments annotated", "nugget_type": "DST", "argument_type": "Object", "tokens": ["larger", "datasets", "of", "comments", "annotated"], "offsets": [187, 188, 189, 190, 191]}], "trigger": {"text": "need", "tokens": ["need"], "offsets": [185]}}], "document": ["moderation", "is", "crucial", "to", "promoting", "healthy", "online", "discussions", ".", "although", "several", "\u2018", "toxicity", "\u2019", "detection", "datasets", "and", "models", "have", "been", "published", ",", "most", "of", "them", "ignore", "the", "context", "of", "the", "posts", ",", "implicitly", "assuming", "that", "comments", "may", "be", "judged", "independently", ".", "we", "investigate", "this", "assumption", "by", "focusing", "on", "two", "questions", ":", "(", "a", ")", "does", "context", "affect", "the", "human", "judgement", ",", "and", "(", "b", ")", "does", "conditioning", "on", "context", "improve", "performance", "of", "toxicity", "detection", "systems", "?", "we", "experiment", "with", "wikipedia", "conversations", ",", "limiting", "the", "notion", "of", "context", "to", "the", "previous", "post", "in", "the", "thread", "and", "the", "discussion", "title", ".", "we", "find", "that", "context", "can", "both", "amplify", "or", "mitigate", "the", "perceived", "toxicity", "of", "posts", ".", "moreover", ",", "a", "small", "but", "significant", "subset", "of", "manually", "labeled", "posts", "(", "5", "%", "in", "one", "of", "our", "experiments", ")", "end", "up", "having", "the", "opposite", "toxicity", "labels", "if", "the", "annotators", "are", "not", "provided", "with", "context", ".", "surprisingly", ",", "we", "also", "find", "no", "evidence", "that", "context", "actually", "improves", "the", "performance", "of", "toxicity", "classifiers", ",", "having", "tried", "a", "range", "of", "classifiers", "and", "mechanisms", "to", "make", "them", "context", "aware", ".", "this", "points", "to", "the", "need", "for", "larger", "datasets", "of", "comments", "annotated", "in", "context", ".", "we", "make", "our", "code", "and", "data", "publicly", "available", "."]}, {"venue": "ACL", "title": "Learning Adaptive Segmentation Policy for End-to-End Simultaneous Translation", "abstract": "End-to-end simultaneous speech-to-text translation aims to directly perform translation from streaming source speech to target text with high translation quality and low latency. A typical simultaneous translation (ST) system consists of a speech translation model and a policy module, which determines when to wait and when to translate. Thus the policy is crucial to balance translation quality and latency. Conventional methods usually adopt fixed policies, e.g. segmenting the source speech with a fixed length and generating translation. However, this method ignores contextual information and suffers from low translation quality. This paper proposes an adaptive segmentation policy for end-to-end ST. Inspired by human interpreters, the policy learns to segment the source streaming speech into meaningful units by considering both acoustic features and translation history, maintaining consistency between the segmentation and translation. Experimental results on English-German and Chinese-English show that our method achieves a good accuracy-latency trade-off over recently proposed state-of-the-art methods.", "doc_id": "905fe19b8cfa002332e36c53c3e5b23e", "publication_year": 2022, "sentences": ["end - to - end simultaneous speech - to - text translation aims to directly perform translation from streaming source speech to target text with high translation quality and low latency .", "a typical simultaneous translation ( st ) system consists of a speech translation model and a policy module , which determines when to wait and when to translate .", "thus the policy is crucial to balance translation quality and latency .", "conventional methods usually adopt fixed policies , e . g . segmenting the source speech with a fixed length and generating translation .", "however , this method ignores contextual information and suffers from low translation quality .", "this paper proposes an adaptive segmentation policy for end - to - end st .", "inspired by human interpreters , the policy learns to segment the source streaming speech into meaningful units by considering both acoustic features and translation history , maintaining consistency between the segmentation and translation .", "experimental results on english - german and chinese - english show that our method achieves a good accuracy - latency trade - off over recently proposed state - of - the - art methods ."], "events": [{"event_type": "ITT", "arguments": [{"text": "end - to - end simultaneous speech - to - text translation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["end", "-", "to", "-", "end", "simultaneous", "speech", "-", "to", "-", "text", "translation"], "offsets": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]}], "trigger": {"text": "perform", "tokens": ["perform"], "offsets": [15]}}, {"event_type": "RWS", "arguments": [{"text": "conventional methods", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["conventional", "methods"], "offsets": [73, 74]}, {"text": "fixed policies", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["fixed", "policies"], "offsets": [77, 78]}], "trigger": {"text": "adopt", "tokens": ["adopt"], "offsets": [76]}}, {"event_type": "RWF", "arguments": [{"text": "ignores", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["ignores"], "offsets": [100]}], "trigger": {"text": "ignores", "tokens": ["ignores"], "offsets": [100]}}, {"event_type": "RWF", "arguments": [{"text": "low translation quality", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["low", "translation", "quality"], "offsets": [106, 107, 108]}], "trigger": {"text": "suffers", "tokens": ["suffers"], "offsets": [104]}}, {"event_type": "PRP", "arguments": [{"text": "adaptive segmentation policy", "nugget_type": "APP", "argument_type": "Content", "tokens": ["adaptive", "segmentation", "policy"], "offsets": [114, 115, 116]}, {"text": "end - to - end st", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["end", "-", "to", "-", "end", "simultaneous", "translation"], "offsets": [118, 119, 120, 121, 122, 34, 35]}], "trigger": {"text": "proposes", "tokens": ["proposes"], "offsets": [112]}}, {"event_type": "MDS", "arguments": [{"text": "source streaming speech", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["source", "streaming", "speech"], "offsets": [136, 137, 138]}, {"text": "meaningful units", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["meaningful", "units"], "offsets": [140, 141]}, {"text": "by considering both acoustic features and translation history", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["by", "considering", "both", "acoustic", "features", "and", "translation", "history"], "offsets": [142, 143, 144, 145, 146, 147, 148, 149]}, {"text": "maintaining consistency between the segmentation and translation", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["maintaining", "consistency", "between", "the", "segmentation", "and", "translation"], "offsets": [151, 152, 153, 154, 155, 156, 157]}], "trigger": {"text": "segment", "tokens": ["segment"], "offsets": [134]}}, {"event_type": "CMP", "arguments": [{"text": "on english - german and chinese - english", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "english", "-", "german", "and", "chinese", "-", "english"], "offsets": [161, 162, 163, 164, 165, 166, 167, 168]}, {"text": "good accuracy - latency trade - off", "nugget_type": "STR", "argument_type": "Result", "tokens": ["good", "accuracy", "-", "latency", "trade", "-", "off"], "offsets": [175, 176, 177, 178, 179, 180, 181]}, {"text": "recently proposed state - of - the - art methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["recently", "proposed", "state", "-", "of", "-", "the", "-", "art", "methods"], "offsets": [183, 184, 185, 186, 187, 188, 189, 190, 191, 192]}, {"text": "adaptive segmentation policy", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["adaptive", "segmentation", "policy"], "offsets": [114, 115, 116]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [173]}}, {"event_type": "FIN", "arguments": [{"text": "achieves", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["achieves"], "offsets": [173]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [169]}}], "document": ["end", "-", "to", "-", "end", "simultaneous", "speech", "-", "to", "-", "text", "translation", "aims", "to", "directly", "perform", "translation", "from", "streaming", "source", "speech", "to", "target", "text", "with", "high", "translation", "quality", "and", "low", "latency", ".", "a", "typical", "simultaneous", "translation", "(", "st", ")", "system", "consists", "of", "a", "speech", "translation", "model", "and", "a", "policy", "module", ",", "which", "determines", "when", "to", "wait", "and", "when", "to", "translate", ".", "thus", "the", "policy", "is", "crucial", "to", "balance", "translation", "quality", "and", "latency", ".", "conventional", "methods", "usually", "adopt", "fixed", "policies", ",", "e", ".", "g", ".", "segmenting", "the", "source", "speech", "with", "a", "fixed", "length", "and", "generating", "translation", ".", "however", ",", "this", "method", "ignores", "contextual", "information", "and", "suffers", "from", "low", "translation", "quality", ".", "this", "paper", "proposes", "an", "adaptive", "segmentation", "policy", "for", "end", "-", "to", "-", "end", "st", ".", "inspired", "by", "human", "interpreters", ",", "the", "policy", "learns", "to", "segment", "the", "source", "streaming", "speech", "into", "meaningful", "units", "by", "considering", "both", "acoustic", "features", "and", "translation", "history", ",", "maintaining", "consistency", "between", "the", "segmentation", "and", "translation", ".", "experimental", "results", "on", "english", "-", "german", "and", "chinese", "-", "english", "show", "that", "our", "method", "achieves", "a", "good", "accuracy", "-", "latency", "trade", "-", "off", "over", "recently", "proposed", "state", "-", "of", "-", "the", "-", "art", "methods", "."]}, {"venue": "ACL", "title": "Intrinsic Bias Metrics Do Not Correlate with Application Bias", "abstract": "Natural Language Processing (NLP) systems learn harmful societal biases that cause them to amplify inequality as they are deployed in more and more situations. To guide efforts at debiasing these systems, the NLP community relies on a variety of metrics that quantify bias in models. Some of these metrics are intrinsic, measuring bias in word embedding spaces, and some are extrinsic, measuring bias in downstream tasks that the word embeddings enable. Do these intrinsic and extrinsic metrics correlate with each other? We compare intrinsic and extrinsic metrics across hundreds of trained models covering different tasks and experimental conditions. Our results show no reliable correlation between these metrics that holds in all scenarios across tasks and languages. We urge researchers working on debiasing to focus on extrinsic measures of bias, and to make using these measures more feasible via creation of new challenge sets and annotated test data. To aid this effort, we release code, a new intrinsic metric, and an annotated test set focused on gender bias in hate speech.", "doc_id": "c8a4732053cac389fe24cfb8b292e140", "publication_year": 2021, "sentences": ["natural language processing ( nlp ) systems learn harmful societal biases that cause them to amplify inequality as they are deployed in more and more situations .", "to guide efforts at debiasing these systems , the nlp community relies on a variety of metrics that quantify bias in models .", "some of these metrics are intrinsic , measuring bias in word embedding spaces , and some are extrinsic , measuring bias in downstream tasks that the word embeddings enable .", "do these intrinsic and extrinsic metrics correlate with each other ?", "we compare intrinsic and extrinsic metrics across hundreds of trained models covering different tasks and experimental conditions .", "our results show no reliable correlation between these metrics that holds in all scenarios across tasks and languages .", "we urge researchers working on debiasing to focus on extrinsic measures of bias , and to make using these measures more feasible via creation of new challenge sets and annotated test data .", "to aid this effort , we release code , a new intrinsic metric , and an annotated test set focused on gender bias in hate speech ."], "events": [{"event_type": "ITT", "arguments": [{"text": "natural language processing ( nlp ) systems", "nugget_type": "APP", "argument_type": "Target", "tokens": ["natural", "language", "processing", "systems"], "offsets": [0, 1, 2, 6]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [7]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [91]}, {"text": "across hundreds of trained models covering different tasks and experimental conditions", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["across", "hundreds", "of", "trained", "models", "covering", "different", "tasks", "and", "experimental", "conditions"], "offsets": [97, 98, 99, 100, 101, 102, 103, 104, 105, 106, 107]}, {"text": "intrinsic metrics", "nugget_type": "APP", "argument_type": "Content", "tokens": ["intrinsic", "metrics"], "offsets": [93, 96]}, {"text": "extrinsic metrics", "nugget_type": "APP", "argument_type": "Content", "tokens": ["extrinsic", "metrics"], "offsets": [95, 96]}], "trigger": {"text": "compare", "tokens": ["compare"], "offsets": [92]}}, {"event_type": "FAC", "arguments": [{"text": "no reliable correlation", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["no", "reliable", "correlation"], "offsets": [112, 113, 114]}, {"text": "between these metrics", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["between", "these", "metrics"], "offsets": [115, 116, 117]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [111]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [166]}, {"text": "code", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["code"], "offsets": [168]}, {"text": "intrinsic metric", "nugget_type": "APP", "argument_type": "Content", "tokens": ["intrinsic", "metric"], "offsets": [172, 173]}, {"text": "annotated test set", "nugget_type": "DST", "argument_type": "Content", "tokens": ["annotated", "test", "set"], "offsets": [177, 178, 179]}], "trigger": {"text": "release", "tokens": ["release"], "offsets": [167]}}], "document": ["natural", "language", "processing", "(", "nlp", ")", "systems", "learn", "harmful", "societal", "biases", "that", "cause", "them", "to", "amplify", "inequality", "as", "they", "are", "deployed", "in", "more", "and", "more", "situations", ".", "to", "guide", "efforts", "at", "debiasing", "these", "systems", ",", "the", "nlp", "community", "relies", "on", "a", "variety", "of", "metrics", "that", "quantify", "bias", "in", "models", ".", "some", "of", "these", "metrics", "are", "intrinsic", ",", "measuring", "bias", "in", "word", "embedding", "spaces", ",", "and", "some", "are", "extrinsic", ",", "measuring", "bias", "in", "downstream", "tasks", "that", "the", "word", "embeddings", "enable", ".", "do", "these", "intrinsic", "and", "extrinsic", "metrics", "correlate", "with", "each", "other", "?", "we", "compare", "intrinsic", "and", "extrinsic", "metrics", "across", "hundreds", "of", "trained", "models", "covering", "different", "tasks", "and", "experimental", "conditions", ".", "our", "results", "show", "no", "reliable", "correlation", "between", "these", "metrics", "that", "holds", "in", "all", "scenarios", "across", "tasks", "and", "languages", ".", "we", "urge", "researchers", "working", "on", "debiasing", "to", "focus", "on", "extrinsic", "measures", "of", "bias", ",", "and", "to", "make", "using", "these", "measures", "more", "feasible", "via", "creation", "of", "new", "challenge", "sets", "and", "annotated", "test", "data", ".", "to", "aid", "this", "effort", ",", "we", "release", "code", ",", "a", "new", "intrinsic", "metric", ",", "and", "an", "annotated", "test", "set", "focused", "on", "gender", "bias", "in", "hate", "speech", "."]}, {"venue": "ACL", "title": "Injecting Numerical Reasoning Skills into Language Models", "abstract": "Large pre-trained language models (LMs) are known to encode substantial amounts of linguistic information. However, high-level reasoning skills, such as numerical reasoning, are difficult to learn from a language-modeling objective only. Consequently, existing models for numerical reasoning have used specialized architectures with limited flexibility. In this work, we show that numerical reasoning is amenable to automatic data generation, and thus one can inject this skill into pre-trained LMs, by generating large amounts of data, and training in a multi-task setup. We show that pre-training our model, GenBERT, on this data, dramatically improves performance on DROP (49.3 \u2013> 72.3 F1), reaching performance that matches state-of-the-art models of comparable size, while using a simple and general-purpose encoder-decoder architecture. Moreover, GenBERT generalizes well to math word problem datasets, while maintaining high performance on standard RC tasks. Our approach provides a general recipe for injecting skills into large pre-trained LMs, whenever the skill is amenable to automatic data augmentation.", "doc_id": "2b1b69d0738696716d88d3328d8bd6e1", "publication_year": 2020, "sentences": ["large pre - trained language models ( lms ) are known to encode substantial amounts of linguistic information .", "however , high - level reasoning skills , such as numerical reasoning , are difficult to learn from a language - modeling objective only .", "consequently , existing models for numerical reasoning have used specialized architectures with limited flexibility .", "in this work , we show that numerical reasoning is amenable to automatic data generation , and thus one can inject this skill into pre - trained lms , by generating large amounts of data , and training in a multi - task setup .", "we show that pre - training our model , genbert , on this data , dramatically improves performance on drop ( 49 . 3 \u2013 > 72 . 3 f1 ) , reaching performance that matches state - of - the - art models of comparable size , while using a simple and general - purpose encoder - decoder architecture .", "moreover , genbert generalizes well to math word problem datasets , while maintaining high performance on standard rc tasks .", "our approach provides a general recipe for injecting skills into large pre - trained lms , whenever the skill is amenable to automatic data augmentation ."], "events": [{"event_type": "ITT", "arguments": [{"text": "large pre - trained language models", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["large", "pre", "-", "trained", "language", "models"], "offsets": [0, 1, 2, 3, 4, 5]}], "trigger": {"text": "known", "tokens": ["known"], "offsets": [10]}}, {"event_type": "RWF", "arguments": [{"text": "high - level reasoning skills", "nugget_type": "MOD", "argument_type": "Fault", "tokens": ["high", "-", "level", "reasoning", "skills"], "offsets": [21, 22, 23, 24, 25]}, {"text": "language - modeling objective", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["language", "-", "modeling", "objective"], "offsets": [38, 39, 40, 41]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [35]}}, {"event_type": "RWS", "arguments": [{"text": "specialized architectures with limited flexibility", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["specialized", "architectures", "with", "limited", "flexibility"], "offsets": [53, 54, 55, 56, 57]}, {"text": "existing models for numerical reasoning", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["existing", "models", "for", "numerical", "reasoning"], "offsets": [46, 47, 48, 49, 50]}], "trigger": {"text": "used", "tokens": ["used"], "offsets": [52]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [63]}, {"text": "amenable", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["amenable"], "offsets": [69]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [64]}}, {"event_type": "FAC", "arguments": [{"text": "numerical reasoning", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["numerical", "reasoning"], "offsets": [66, 67]}, {"text": "automatic data generation", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["automatic", "data", "generation"], "offsets": [71, 72, 73]}], "trigger": {"text": "amenable", "tokens": ["amenable"], "offsets": [69]}}, {"event_type": "MDS", "arguments": [{"text": "large amounts of data", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["large", "amounts", "of", "data"], "offsets": [90, 91, 92, 93]}, {"text": "inject", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["inject"], "offsets": [79]}], "trigger": {"text": "generating", "tokens": ["generating"], "offsets": [89]}}, {"event_type": "MDS", "arguments": [{"text": "multi - task setup", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["multi", "-", "task", "setup"], "offsets": [99, 100, 101, 102]}, {"text": "inject", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["inject"], "offsets": [79]}], "trigger": {"text": "training", "tokens": ["training"], "offsets": [96]}}, {"event_type": "PUR", "arguments": [{"text": "pre - trained language models", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["pre", "-", "trained", "language", "models"], "offsets": [1, 2, 3, 4, 5]}], "trigger": {"text": "inject", "tokens": ["inject"], "offsets": [79]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [104]}, {"text": "improves", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["improves"], "offsets": [120]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [105]}}, {"event_type": "CMP", "arguments": [{"text": "improves", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improves"], "offsets": [120]}, {"text": "72 . 3", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["72", ".", "3"], "offsets": [130, 131, 132]}, {"text": "f1", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["f1"], "offsets": [133]}, {"text": "state - of - the - art models of comparable size", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art", "models", "of", "comparable", "size"], "offsets": [140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150]}, {"text": "genbert", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["genbert"], "offsets": [113]}], "trigger": {"text": "improves", "tokens": ["improves"], "offsets": [120]}}, {"event_type": "RWS", "arguments": [{"text": "state - of - the - art models of comparable size", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["state", "-", "of", "-", "the", "-", "art", "models", "of", "comparable", "size"], "offsets": [140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150]}, {"text": "simple and general - purpose encoder - decoder architecture", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["simple", "and", "general", "-", "purpose", "encoder", "-", "decoder", "architecture"], "offsets": [155, 156, 157, 158, 159, 160, 161, 162, 163]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [153]}}, {"event_type": "FAC", "arguments": [{"text": "genbert", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["genbert"], "offsets": [167]}, {"text": "math word problem datasets", "nugget_type": "DST", "argument_type": "Object", "tokens": ["math", "word", "problem", "datasets"], "offsets": [171, 172, 173, 174]}, {"text": "well", "nugget_type": "STR", "argument_type": "Target", "tokens": ["well"], "offsets": [169]}], "trigger": {"text": "generalizes", "tokens": ["generalizes"], "offsets": [168]}}, {"event_type": "FAC", "arguments": [{"text": "genbert", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["genbert"], "offsets": [167]}, {"text": "on standard rc tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "standard", "rc", "tasks"], "offsets": [180, 181, 182, 183]}, {"text": "high performance", "nugget_type": "STR", "argument_type": "Object", "tokens": ["high", "performance"], "offsets": [178, 179]}], "trigger": {"text": "maintaining", "tokens": ["maintaining"], "offsets": [177]}}, {"event_type": "MDS", "arguments": [{"text": "general recipe for injecting skills into large pre - trained lms", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["general", "recipe", "for", "injecting", "skills", "into", "large", "pre", "-", "trained", "lms"], "offsets": [189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199]}], "trigger": {"text": "provides", "tokens": ["provides"], "offsets": [187]}}], "document": ["large", "pre", "-", "trained", "language", "models", "(", "lms", ")", "are", "known", "to", "encode", "substantial", "amounts", "of", "linguistic", "information", ".", "however", ",", "high", "-", "level", "reasoning", "skills", ",", "such", "as", "numerical", "reasoning", ",", "are", "difficult", "to", "learn", "from", "a", "language", "-", "modeling", "objective", "only", ".", "consequently", ",", "existing", "models", "for", "numerical", "reasoning", "have", "used", "specialized", "architectures", "with", "limited", "flexibility", ".", "in", "this", "work", ",", "we", "show", "that", "numerical", "reasoning", "is", "amenable", "to", "automatic", "data", "generation", ",", "and", "thus", "one", "can", "inject", "this", "skill", "into", "pre", "-", "trained", "lms", ",", "by", "generating", "large", "amounts", "of", "data", ",", "and", "training", "in", "a", "multi", "-", "task", "setup", ".", "we", "show", "that", "pre", "-", "training", "our", "model", ",", "genbert", ",", "on", "this", "data", ",", "dramatically", "improves", "performance", "on", "drop", "(", "49", ".", "3", "\u2013", ">", "72", ".", "3", "f1", ")", ",", "reaching", "performance", "that", "matches", "state", "-", "of", "-", "the", "-", "art", "models", "of", "comparable", "size", ",", "while", "using", "a", "simple", "and", "general", "-", "purpose", "encoder", "-", "decoder", "architecture", ".", "moreover", ",", "genbert", "generalizes", "well", "to", "math", "word", "problem", "datasets", ",", "while", "maintaining", "high", "performance", "on", "standard", "rc", "tasks", ".", "our", "approach", "provides", "a", "general", "recipe", "for", "injecting", "skills", "into", "large", "pre", "-", "trained", "lms", ",", "whenever", "the", "skill", "is", "amenable", "to", "automatic", "data", "augmentation", "."]}, {"venue": "ACL", "title": "A Novel Cascade Binary Tagging Framework for Relational Triple Extraction", "abstract": "Extracting relational triples from unstructured text is crucial for large-scale knowledge graph construction. However, few existing works excel in solving the overlapping triple problem where multiple relational triples in the same sentence share the same entities. In this work, we introduce a fresh perspective to revisit the relational triple extraction task and propose a novel cascade binary tagging framework (CasRel) derived from a principled problem formulation. Instead of treating relations as discrete labels as in previous works, our new framework models relations as functions that map subjects to objects in a sentence, which naturally handles the overlapping problem. Experiments show that the CasRel framework already outperforms state-of-the-art methods even when its encoder module uses a randomly initialized BERT encoder, showing the power of the new tagging framework. It enjoys further performance boost when employing a pre-trained BERT encoder, outperforming the strongest baseline by 17.5 and 30.2 absolute gain in F1-score on two public datasets NYT and WebNLG, respectively. In-depth analysis on different scenarios of overlapping triples shows that the method delivers consistent performance gain across all these scenarios. The source code and data are released online.", "doc_id": "6c81634996dd3fef4e930db5f6b464b4", "publication_year": 2020, "sentences": ["extracting relational triples from unstructured text is crucial for large - scale knowledge graph construction .", "however , few existing works excel in solving the overlapping triple problem where multiple relational triples in the same sentence share the same entities .", "in this work , we introduce a fresh perspective to revisit the relational triple extraction task and propose a novel cascade binary tagging framework ( casrel ) derived from a principled problem formulation .", "instead of treating relations as discrete labels as in previous works , our new framework models relations as functions that map subjects to objects in a sentence , which naturally handles the overlapping problem .", "experiments show that the casrel framework already outperforms state - of - the - art methods even when its encoder module uses a randomly initialized bert encoder , showing the power of the new tagging framework .", "it enjoys further performance boost when employing a pre - trained bert encoder , outperforming the strongest baseline by 17 . 5 and 30 . 2 absolute gain in f1 - score on two public datasets nyt and webnlg , respectively .", "in - depth analysis on different scenarios of overlapping triples shows that the method delivers consistent performance gain across all these scenarios .", "the source code and data are released online ."], "events": [{"event_type": "ITT", "arguments": [{"text": "extracting relational triples from unstructured text", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["extracting", "relational", "triples", "from", "unstructured", "text"], "offsets": [0, 1, 2, 3, 4, 5]}], "trigger": {"text": "crucial", "tokens": ["crucial"], "offsets": [7]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [45]}, {"text": "fresh perspective", "nugget_type": "APP", "argument_type": "Content", "tokens": ["fresh", "perspective"], "offsets": [48, 49]}, {"text": "revisit", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["revisit"], "offsets": [51]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [46]}}, {"event_type": "PUR", "arguments": [{"text": "relational triple extraction task", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["relational", "triple", "extraction", "task"], "offsets": [53, 54, 55, 56]}], "trigger": {"text": "revisit", "tokens": ["revisit"], "offsets": [51]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [45]}, {"text": "cascade binary tagging framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["cascade", "binary", "tagging", "framework"], "offsets": [61, 62, 63, 64]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [58]}}, {"event_type": "FIN", "arguments": [{"text": "outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforms"], "offsets": [117]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [111]}}, {"event_type": "CMP", "arguments": [{"text": "cascade binary tagging framework", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["cascade", "binary", "tagging", "framework"], "offsets": [61, 62, 63, 64]}, {"text": "state - of - the - art methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art", "methods"], "offsets": [118, 119, 120, 121, 122, 123, 124, 125]}, {"text": "when its encoder module uses a randomly initialized bert encoder", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "its", "encoder", "module", "uses", "a", "randomly", "initialized", "bert", "encoder"], "offsets": [127, 128, 129, 130, 131, 132, 133, 134, 135, 136]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [117]}}, {"event_type": "CMP", "arguments": [{"text": "outperforming", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforming"], "offsets": [161]}, {"text": "employing a pre - trained bert encoder", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["employing", "a", "pre", "-", "trained", "bert", "encoder"], "offsets": [153, 154, 155, 156, 157, 158, 159]}, {"text": "strongest baseline", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["strongest", "baseline"], "offsets": [163, 164]}, {"text": "17 . 5", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["17", ".", "5"], "offsets": [166, 167, 168]}, {"text": "absolute gain in f1 - score", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["absolute", "gain", "in", "f1", "-", "score"], "offsets": [173, 174, 175, 176, 177, 178]}, {"text": "public datasets nyt", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["public", "datasets", "nyt"], "offsets": [181, 182, 183]}], "trigger": {"text": "outperforming", "tokens": ["outperforming"], "offsets": [161]}}, {"event_type": "CMP", "arguments": [{"text": "employing a pre - trained bert encoder", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["employing", "a", "pre", "-", "trained", "bert", "encoder"], "offsets": [153, 154, 155, 156, 157, 158, 159]}, {"text": "outperforming", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforming"], "offsets": [161]}, {"text": "strongest baseline", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["strongest", "baseline"], "offsets": [163, 164]}, {"text": "30 . 2", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["30", ".", "2"], "offsets": [170, 171, 172]}, {"text": "absolute gain in f1 - score", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["absolute", "gain", "in", "f1", "-", "score"], "offsets": [173, 174, 175, 176, 177, 178]}, {"text": "webnlg", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["webnlg"], "offsets": [185]}], "trigger": {"text": "outperforming", "tokens": ["outperforming"], "offsets": [161]}}, {"event_type": "FIN", "arguments": [{"text": "delivers", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["delivers"], "offsets": [203]}], "trigger": {"text": "shows", "tokens": ["shows"], "offsets": [199]}}, {"event_type": "FAC", "arguments": [{"text": "cascade binary tagging framework", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["cascade", "binary", "tagging", "framework"], "offsets": [61, 62, 63, 64]}, {"text": "consistent performance gain", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["consistent", "performance", "gain"], "offsets": [204, 205, 206]}, {"text": "across all these scenarios", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["across", "all", "these", "scenarios"], "offsets": [207, 208, 209, 210]}], "trigger": {"text": "delivers", "tokens": ["delivers"], "offsets": [203]}}], "document": ["extracting", "relational", "triples", "from", "unstructured", "text", "is", "crucial", "for", "large", "-", "scale", "knowledge", "graph", "construction", ".", "however", ",", "few", "existing", "works", "excel", "in", "solving", "the", "overlapping", "triple", "problem", "where", "multiple", "relational", "triples", "in", "the", "same", "sentence", "share", "the", "same", "entities", ".", "in", "this", "work", ",", "we", "introduce", "a", "fresh", "perspective", "to", "revisit", "the", "relational", "triple", "extraction", "task", "and", "propose", "a", "novel", "cascade", "binary", "tagging", "framework", "(", "casrel", ")", "derived", "from", "a", "principled", "problem", "formulation", ".", "instead", "of", "treating", "relations", "as", "discrete", "labels", "as", "in", "previous", "works", ",", "our", "new", "framework", "models", "relations", "as", "functions", "that", "map", "subjects", "to", "objects", "in", "a", "sentence", ",", "which", "naturally", "handles", "the", "overlapping", "problem", ".", "experiments", "show", "that", "the", "casrel", "framework", "already", "outperforms", "state", "-", "of", "-", "the", "-", "art", "methods", "even", "when", "its", "encoder", "module", "uses", "a", "randomly", "initialized", "bert", "encoder", ",", "showing", "the", "power", "of", "the", "new", "tagging", "framework", ".", "it", "enjoys", "further", "performance", "boost", "when", "employing", "a", "pre", "-", "trained", "bert", "encoder", ",", "outperforming", "the", "strongest", "baseline", "by", "17", ".", "5", "and", "30", ".", "2", "absolute", "gain", "in", "f1", "-", "score", "on", "two", "public", "datasets", "nyt", "and", "webnlg", ",", "respectively", ".", "in", "-", "depth", "analysis", "on", "different", "scenarios", "of", "overlapping", "triples", "shows", "that", "the", "method", "delivers", "consistent", "performance", "gain", "across", "all", "these", "scenarios", ".", "the", "source", "code", "and", "data", "are", "released", "online", "."]}, {"venue": "ACL", "title": "On the Importance of Effectively Adapting Pretrained Language Models for Active Learning", "abstract": "Recent active learning (AL) approaches in Natural Language Processing (NLP) proposed using off-the-shelf pretrained language models (LMs). In this paper, we argue that these LMs are not adapted effectively to the downstream task during AL and we explore ways to address this issue. We suggest to first adapt the pretrained LM to the target task by continuing training with all the available unlabeled data and then use it for AL. We also propose a simple yet effective fine-tuning method to ensure that the adapted LM is properly trained in both low and high resource scenarios during AL. Our experiments demonstrate that our approach provides substantial data efficiency improvements compared to the standard fine-tuning approach, suggesting that a poor training strategy can be catastrophic for AL.", "doc_id": "bcbd83a7eedf0d519d979c4daa80dbee", "publication_year": 2022, "sentences": ["recent active learning ( al ) approaches in natural language processing ( nlp ) proposed using off - the - shelf pretrained language models ( lms ) .", "in this paper , we argue that these lms are not adapted effectively to the downstream task during al and we explore ways to address this issue .", "we suggest to first adapt the pretrained lm to the target task by continuing training with all the available unlabeled data and then use it for al .", "we also propose a simple yet effective fine - tuning method to ensure that the adapted lm is properly trained in both low and high resource scenarios during al .", "our experiments demonstrate that our approach provides substantial data efficiency improvements compared to the standard fine - tuning approach , suggesting that a poor training strategy can be catastrophic for al ."], "events": [{"event_type": "ITT", "arguments": [{"text": "natural language processing", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["natural", "language", "processing"], "offsets": [8, 9, 10]}], "trigger": {"text": "proposed", "tokens": ["proposed"], "offsets": [14]}}, {"event_type": "RWF", "arguments": [{"text": "not adapted effectively", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["not", "adapted", "effectively"], "offsets": [38, 39, 40]}, {"text": "downstream task during al", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["downstream", "task", "during", "active", "learning"], "offsets": [43, 44, 45, 1, 2]}], "trigger": {"text": "not adapted effectively", "tokens": ["not", "adapted", "effectively"], "offsets": [38, 39, 40]}}, {"event_type": "MDS", "arguments": [{"text": "all the available unlabeled data", "nugget_type": "DST", "argument_type": "BaseComponent", "tokens": ["all", "the", "available", "unlabeled", "data"], "offsets": [72, 73, 74, 75, 76]}, {"text": "adapt", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["adapt"], "offsets": [60]}], "trigger": {"text": "continuing training", "tokens": ["continuing", "training"], "offsets": [69, 70]}}, {"event_type": "MDS", "arguments": [{"text": "pretrained lm", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["pretrained", "language", "models"], "offsets": [62, 22, 23]}, {"text": "al", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["active", "learning"], "offsets": [1, 2]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [79]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [84]}, {"text": "simple yet effective fine - tuning method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["simple", "yet", "effective", "fine", "-", "tuning", "method"], "offsets": [88, 89, 90, 91, 92, 93, 94]}, {"text": "trained", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["trained"], "offsets": [103]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [86]}}, {"event_type": "PUR", "arguments": [{"text": "adapted lm", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["adapted", "language", "models"], "offsets": [99, 22, 23]}, {"text": "in both low and high resource scenarios during al", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "both", "low", "and", "high", "resource", "scenarios", "during", "active", "learning"], "offsets": [104, 105, 106, 107, 108, 109, 110, 111, 1, 2]}], "trigger": {"text": "trained", "tokens": ["trained"], "offsets": [103]}}, {"event_type": "CMP", "arguments": [{"text": "substantial", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["substantial"], "offsets": [121]}, {"text": "data efficiency improvements", "nugget_type": "STR", "argument_type": "Result", "tokens": ["data", "efficiency", "improvements"], "offsets": [122, 123, 124]}, {"text": "standard fine - tuning approach", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["standard", "fine", "-", "tuning", "approach"], "offsets": [128, 129, 130, 131, 132]}, {"text": "simple yet effective fine - tuning method", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["simple", "yet", "effective", "fine", "-", "tuning", "method"], "offsets": [88, 89, 90, 91, 92, 93, 94]}], "trigger": {"text": "provides", "tokens": ["provides"], "offsets": [120]}}, {"event_type": "FAC", "arguments": [{"text": "poor training strategy", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["poor", "training", "strategy"], "offsets": [137, 138, 139]}, {"text": "al", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["active", "learning"], "offsets": [1, 2]}], "trigger": {"text": "catastrophic", "tokens": ["catastrophic"], "offsets": [142]}}, {"event_type": "PUR", "arguments": [{"text": "pretrained lm", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["pretrained", "language", "models"], "offsets": [62, 22, 23]}, {"text": "to the target task", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["to", "the", "target", "task"], "offsets": [64, 65, 66, 67]}], "trigger": {"text": "adapt", "tokens": ["adapt"], "offsets": [60]}}, {"event_type": "FIN", "arguments": [{"text": "provides", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["provides"], "offsets": [120]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [116]}}, {"event_type": "FIN", "arguments": [{"text": "catastrophic", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["catastrophic"], "offsets": [142]}], "trigger": {"text": "suggesting", "tokens": ["suggesting"], "offsets": [134]}}], "document": ["recent", "active", "learning", "(", "al", ")", "approaches", "in", "natural", "language", "processing", "(", "nlp", ")", "proposed", "using", "off", "-", "the", "-", "shelf", "pretrained", "language", "models", "(", "lms", ")", ".", "in", "this", "paper", ",", "we", "argue", "that", "these", "lms", "are", "not", "adapted", "effectively", "to", "the", "downstream", "task", "during", "al", "and", "we", "explore", "ways", "to", "address", "this", "issue", ".", "we", "suggest", "to", "first", "adapt", "the", "pretrained", "lm", "to", "the", "target", "task", "by", "continuing", "training", "with", "all", "the", "available", "unlabeled", "data", "and", "then", "use", "it", "for", "al", ".", "we", "also", "propose", "a", "simple", "yet", "effective", "fine", "-", "tuning", "method", "to", "ensure", "that", "the", "adapted", "lm", "is", "properly", "trained", "in", "both", "low", "and", "high", "resource", "scenarios", "during", "al", ".", "our", "experiments", "demonstrate", "that", "our", "approach", "provides", "substantial", "data", "efficiency", "improvements", "compared", "to", "the", "standard", "fine", "-", "tuning", "approach", ",", "suggesting", "that", "a", "poor", "training", "strategy", "can", "be", "catastrophic", "for", "al", "."]}, {"venue": "ACL", "title": "Doing Good or Doing Right? Exploring the Weakness of Commonsense Causal Reasoning Models", "abstract": "Pretrained language models (PLM) achieve surprising performance on the Choice of Plausible Alternatives (COPA) task. However, whether PLMs have truly acquired the ability of causal reasoning remains a question. In this paper, we investigate the problem of semantic similarity bias and reveal the vulnerability of current COPA models by certain attacks. Previous solutions that tackle the superficial cues of unbalanced token distribution still encounter the same problem of semantic bias, even more seriously due to the utilization of more training data. We mitigate this problem by simply adding a regularization loss and experimental results show that this solution not only improves the model\u2019s generalization ability, but also assists the models to perform more robustly on a challenging dataset, BCOPA-CE, which has unbiased token distribution and is more difficult for models to distinguish cause and effect.", "doc_id": "d746ee7ccdcee8f75a20579ecb17f57a", "publication_year": 2021, "sentences": ["pretrained language models ( plm ) achieve surprising performance on the choice of plausible alternatives ( copa ) task .", "however , whether plms have truly acquired the ability of causal reasoning remains a question .", "in this paper , we investigate the problem of semantic similarity bias and reveal the vulnerability of current copa models by certain attacks .", "previous solutions that tackle the superficial cues of unbalanced token distribution still encounter the same problem of semantic bias , even more seriously due to the utilization of more training data .", "we mitigate this problem by simply adding a regularization loss and experimental results show that this solution not only improves the model \u2019 s generalization ability , but also assists the models to perform more robustly on a challenging dataset , bcopa - ce , which has unbiased token distribution and is more difficult for models to distinguish cause and effect ."], "events": [{"event_type": "ITT", "arguments": [{"text": "choice of plausible alternatives", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["choice", "of", "plausible", "alternatives"], "offsets": [11, 12, 13, 14]}], "trigger": {"text": "achieve", "tokens": ["achieve"], "offsets": [6]}}, {"event_type": "RWF", "arguments": [{"text": "question", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["question"], "offsets": [34]}, {"text": "plms", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["plms"], "offsets": [23]}, {"text": "ability of causal reasoning", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["ability", "of", "causal", "reasoning"], "offsets": [28, 29, 30, 31]}], "trigger": {"text": "remains", "tokens": ["remains"], "offsets": [32]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [40]}, {"text": "semantic similarity bias", "nugget_type": "WEA", "argument_type": "Content", "tokens": ["semantic", "similarity", "bias"], "offsets": [45, 46, 47]}], "trigger": {"text": "investigate", "tokens": ["investigate"], "offsets": [41]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [40]}, {"text": "vulnerability of current copa models", "nugget_type": "WEA", "argument_type": "Content", "tokens": ["vulnerability", "of", "current", "copa", "models"], "offsets": [51, 52, 53, 54, 55]}, {"text": "by certain attacks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["by", "certain", "attacks"], "offsets": [56, 57, 58]}], "trigger": {"text": "reveal", "tokens": ["reveal"], "offsets": [49]}}, {"event_type": "PUR", "arguments": [{"text": "semantic bias", "nugget_type": "WEA", "argument_type": "Aim", "tokens": ["semantic", "bias"], "offsets": [77, 78]}], "trigger": {"text": "mitigate", "tokens": ["mitigate"], "offsets": [93]}}, {"event_type": "FIN", "arguments": [{"text": "improves", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["improves"], "offsets": [111]}, {"text": "assists", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["assists"], "offsets": [121]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [105]}}, {"event_type": "FAC", "arguments": [{"text": "model \u2019 s generalization ability", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["pretrained", "language", "models", "\u2019", "s", "generalization", "ability"], "offsets": [0, 1, 2, 114, 115, 116, 117]}, {"text": "this solution", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["simply", "adding", "a", "regularization", "loss"], "offsets": [97, 98, 99, 100, 101]}], "trigger": {"text": "improves", "tokens": ["improves"], "offsets": [111]}}, {"event_type": "FAC", "arguments": [{"text": "pretrained language models", "nugget_type": "APP", "argument_type": "Object", "tokens": ["pretrained", "language", "models"], "offsets": [0, 1, 2]}, {"text": "perform", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["perform"], "offsets": [125]}, {"text": "this solution", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["simply", "adding", "a", "regularization", "loss"], "offsets": [97, 98, 99, 100, 101]}, {"text": "models", "nugget_type": "APP", "argument_type": "Object", "tokens": ["pretrained", "language", "models"], "offsets": [0, 1, 2]}], "trigger": {"text": "assists", "tokens": ["assists"], "offsets": [121]}}, {"event_type": "PUR", "arguments": [{"text": "more robustly", "nugget_type": "STR", "argument_type": "Aim", "tokens": ["more", "robustly"], "offsets": [126, 127]}, {"text": "bcopa - ce", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["bcopa", "-", "ce"], "offsets": [133, 134, 135]}], "trigger": {"text": "perform", "tokens": ["perform"], "offsets": [125]}}, {"event_type": "RWF", "arguments": [{"text": "semantic bias", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["semantic", "bias"], "offsets": [77, 78]}, {"text": "previous solutions", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["previous", "solutions"], "offsets": [60, 61]}, {"text": "tackle", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["tackle"], "offsets": [63]}], "trigger": {"text": "encounter", "tokens": ["encounter"], "offsets": [72]}}, {"event_type": "PUR", "arguments": [{"text": "superficial cues of unbalanced token distribution", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["superficial", "cues", "of", "unbalanced", "token", "distribution"], "offsets": [65, 66, 67, 68, 69, 70]}], "trigger": {"text": "tackle", "tokens": ["tackle"], "offsets": [63]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [92]}, {"text": "mitigate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["mitigate"], "offsets": [93]}, {"text": "regularization loss", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["regularization", "loss"], "offsets": [100, 101]}], "trigger": {"text": "simply adding", "tokens": ["simply", "adding"], "offsets": [97, 98]}}], "document": ["pretrained", "language", "models", "(", "plm", ")", "achieve", "surprising", "performance", "on", "the", "choice", "of", "plausible", "alternatives", "(", "copa", ")", "task", ".", "however", ",", "whether", "plms", "have", "truly", "acquired", "the", "ability", "of", "causal", "reasoning", "remains", "a", "question", ".", "in", "this", "paper", ",", "we", "investigate", "the", "problem", "of", "semantic", "similarity", "bias", "and", "reveal", "the", "vulnerability", "of", "current", "copa", "models", "by", "certain", "attacks", ".", "previous", "solutions", "that", "tackle", "the", "superficial", "cues", "of", "unbalanced", "token", "distribution", "still", "encounter", "the", "same", "problem", "of", "semantic", "bias", ",", "even", "more", "seriously", "due", "to", "the", "utilization", "of", "more", "training", "data", ".", "we", "mitigate", "this", "problem", "by", "simply", "adding", "a", "regularization", "loss", "and", "experimental", "results", "show", "that", "this", "solution", "not", "only", "improves", "the", "model", "\u2019", "s", "generalization", "ability", ",", "but", "also", "assists", "the", "models", "to", "perform", "more", "robustly", "on", "a", "challenging", "dataset", ",", "bcopa", "-", "ce", ",", "which", "has", "unbiased", "token", "distribution", "and", "is", "more", "difficult", "for", "models", "to", "distinguish", "cause", "and", "effect", "."]}, {"venue": "ACL", "title": "H-Transformer-1D: Fast One-Dimensional Hierarchical Attention for Sequences", "abstract": "We describe an efficient hierarchical method to compute attention in the Transformer architecture. The proposed attention mechanism exploits a matrix structure similar to the Hierarchical Matrix (H-Matrix) developed by the numerical analysis community, and has linear run time and memory complexity. We perform extensive experiments to show that the inductive bias embodied by our hierarchical attention is effective in capturing the hierarchical structure in the sequences typical for natural language and vision tasks. Our method is superior to alternative sub-quadratic proposals by over +6 points on average on the Long Range Arena benchmark. It also sets a new SOTA test perplexity on One-Billion Word dataset with 5x fewer model parameters than that of the previous-best Transformer-based models.", "doc_id": "e95e20527bf63555301433cd040c4d27", "publication_year": 2021, "sentences": ["we describe an efficient hierarchical method to compute attention in the transformer architecture .", "the proposed attention mechanism exploits a matrix structure similar to the hierarchical matrix ( h - matrix ) developed by the numerical analysis community , and has linear run time and memory complexity .", "we perform extensive experiments to show that the inductive bias embodied by our hierarchical attention is effective in capturing the hierarchical structure in the sequences typical for natural language and vision tasks .", "our method is superior to alternative sub - quadratic proposals by over + 6 points on average on the long range arena benchmark .", "it also sets a new sota test perplexity on one - billion word dataset with 5x fewer model parameters than that of the previous - best transformer - based models ."], "events": [{"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [0]}, {"text": "efficient hierarchical method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["efficient", "hierarchical", "method"], "offsets": [3, 4, 5]}, {"text": "compute", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["compute"], "offsets": [7]}, {"text": "in the transformer architecture", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "transformer", "architecture"], "offsets": [9, 10, 11, 12]}], "trigger": {"text": "describe", "tokens": ["describe"], "offsets": [1]}}, {"event_type": "PUR", "arguments": [{"text": "attention", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["attention"], "offsets": [8]}, {"text": "in the transformer architecture", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "transformer", "architecture"], "offsets": [9, 10, 11, 12]}], "trigger": {"text": "compute", "tokens": ["compute"], "offsets": [7]}}, {"event_type": "MDS", "arguments": [{"text": "matrix structure", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["matrix", "structure"], "offsets": [20, 21]}, {"text": "similar to the hierarchical matrix ( h - matrix ) developed by the numerical analysis community", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["similar", "to", "the", "hierarchical", "matrix", "developed", "by", "the", "numerical", "analysis", "community"], "offsets": [22, 23, 24, 25, 26, 32, 33, 34, 35, 36, 37]}], "trigger": {"text": "exploits", "tokens": ["exploits"], "offsets": [18]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [48]}, {"text": "extensive experiments", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["extensive", "experiments"], "offsets": [50, 51]}], "trigger": {"text": "perform", "tokens": ["perform"], "offsets": [49]}}, {"event_type": "FAC", "arguments": [{"text": "inductive bias", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["inductive", "bias"], "offsets": [56, 57]}, {"text": "embodied by our hierarchical attention", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["embodied", "by", "our", "hierarchical", "attention"], "offsets": [58, 59, 60, 61, 62]}, {"text": "natural language", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["natural", "language"], "offsets": [75, 76]}, {"text": "capturing", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["capturing"], "offsets": [66]}, {"text": "vision tasks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["vision", "tasks"], "offsets": [78, 79]}], "trigger": {"text": "effective", "tokens": ["effective"], "offsets": [64]}}, {"event_type": "PUR", "arguments": [{"text": "hierarchical structure", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["hierarchical", "structure"], "offsets": [68, 69]}, {"text": "in the sequences typical", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "sequences", "typical"], "offsets": [70, 71, 72, 73]}], "trigger": {"text": "capturing", "tokens": ["capturing"], "offsets": [66]}}, {"event_type": "CMP", "arguments": [{"text": "superior", "nugget_type": "STR", "argument_type": "Result", "tokens": ["superior"], "offsets": [84]}, {"text": "alternative sub - quadratic proposals", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["alternative", "sub", "-", "quadratic", "proposals"], "offsets": [86, 87, 88, 89, 90]}, {"text": "over + 6 points", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["over", "+", "6", "points"], "offsets": [92, 93, 94, 95]}, {"text": "on the long range arena benchmark", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "the", "long", "range", "arena", "benchmark"], "offsets": [98, 99, 100, 101, 102, 103]}], "trigger": {"text": "superior", "tokens": ["superior"], "offsets": [84]}}, {"event_type": "CMP", "arguments": [{"text": "5x", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["5x"], "offsets": [120]}, {"text": "model parameters", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["model", "parameters"], "offsets": [122, 123]}, {"text": "previous - best transformer - based models", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["previous", "-", "best", "transformer", "-", "based", "models"], "offsets": [128, 129, 130, 131, 132, 133, 134]}, {"text": "efficient hierarchical method", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["efficient", "hierarchical", "method"], "offsets": [3, 4, 5]}, {"text": "new sota test perplexity", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["new", "sota", "test", "perplexity"], "offsets": [109, 110, 111, 112]}, {"text": "one - billion word dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["one", "-", "billion", "word", "dataset"], "offsets": [114, 115, 116, 117, 118]}, {"text": "fewer", "nugget_type": "STR", "argument_type": "Result", "tokens": ["fewer"], "offsets": [121]}], "trigger": {"text": "sets", "tokens": ["sets"], "offsets": [107]}}, {"event_type": "WKS", "arguments": [{"text": "linear run time", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["linear", "run", "time"], "offsets": [41, 42, 43]}, {"text": "memory complexity", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["memory", "complexity"], "offsets": [45, 46]}], "trigger": {"text": "has", "tokens": ["has"], "offsets": [40]}}], "document": ["we", "describe", "an", "efficient", "hierarchical", "method", "to", "compute", "attention", "in", "the", "transformer", "architecture", ".", "the", "proposed", "attention", "mechanism", "exploits", "a", "matrix", "structure", "similar", "to", "the", "hierarchical", "matrix", "(", "h", "-", "matrix", ")", "developed", "by", "the", "numerical", "analysis", "community", ",", "and", "has", "linear", "run", "time", "and", "memory", "complexity", ".", "we", "perform", "extensive", "experiments", "to", "show", "that", "the", "inductive", "bias", "embodied", "by", "our", "hierarchical", "attention", "is", "effective", "in", "capturing", "the", "hierarchical", "structure", "in", "the", "sequences", "typical", "for", "natural", "language", "and", "vision", "tasks", ".", "our", "method", "is", "superior", "to", "alternative", "sub", "-", "quadratic", "proposals", "by", "over", "+", "6", "points", "on", "average", "on", "the", "long", "range", "arena", "benchmark", ".", "it", "also", "sets", "a", "new", "sota", "test", "perplexity", "on", "one", "-", "billion", "word", "dataset", "with", "5x", "fewer", "model", "parameters", "than", "that", "of", "the", "previous", "-", "best", "transformer", "-", "based", "models", "."]}, {"venue": "ACL", "title": "e-CARE: a New Dataset for Exploring Explainable Causal Reasoning", "abstract": "Understanding causality has vital importance for various Natural Language Processing (NLP) applications. Beyond the labeled instances, conceptual explanations of the causality can provide deep understanding of the causal fact to facilitate the causal reasoning process. However, such explanation information still remains absent in existing causal reasoning resources. In this paper, we fill this gap by presenting a human-annotated explainable CAusal REasoning dataset (e-CARE), which contains over 20K causal reasoning questions, together with natural language formed explanations of the causal questions. Experimental results show that generating valid explanations for causal facts still remains especially challenging for the state-of-the-art models, and the explanation information can be helpful for promoting the accuracy and stability of causal reasoning models.", "doc_id": "5f0baed23ed360e5d4d8ffbe5b91a790", "publication_year": 2022, "sentences": ["understanding causality has vital importance for various natural language processing ( nlp ) applications .", "beyond the labeled instances , conceptual explanations of the causality can provide deep understanding of the causal fact to facilitate the causal reasoning process .", "however , such explanation information still remains absent in existing causal reasoning resources .", "in this paper , we fill this gap by presenting a human - annotated explainable causal reasoning dataset ( e - care ) , which contains over 20k causal reasoning questions , together with natural language formed explanations of the causal questions .", "experimental results show that generating valid explanations for causal facts still remains especially challenging for the state - of - the - art models , and the explanation information can be helpful for promoting the accuracy and stability of causal reasoning models ."], "events": [{"event_type": "ITT", "arguments": [{"text": "natural language processing ( nlp ) applications", "nugget_type": "APP", "argument_type": "Target", "tokens": ["natural", "language", "processing", "applications"], "offsets": [7, 8, 9, 13]}], "trigger": {"text": "importance", "tokens": ["importance"], "offsets": [4]}}, {"event_type": "RWF", "arguments": [{"text": "explanation information", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["explanation", "information"], "offsets": [43, 44]}, {"text": "absent", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["absent"], "offsets": [47]}, {"text": "in existing causal reasoning resources", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "existing", "causal", "reasoning", "resources"], "offsets": [48, 49, 50, 51, 52]}], "trigger": {"text": "remains", "tokens": ["remains"], "offsets": [46]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [58]}, {"text": "human - annotated explainable causal reasoning dataset", "nugget_type": "DST", "argument_type": "Content", "tokens": ["human", "-", "annotated", "explainable", "causal", "reasoning", "dataset"], "offsets": [65, 66, 67, 68, 69, 70, 71]}, {"text": "fill", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["fill"], "offsets": [59]}], "trigger": {"text": "presenting", "tokens": ["presenting"], "offsets": [63]}}, {"event_type": "PUR", "arguments": [{"text": "explanation information still remains absent", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["explanation", "information", "still", "remains", "absent"], "offsets": [43, 44, 45, 46, 47]}], "trigger": {"text": "fill", "tokens": ["fill"], "offsets": [59]}}, {"event_type": "FIN", "arguments": [{"text": "helpful", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["helpful"], "offsets": [128]}, {"text": "remains", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["remains"], "offsets": [108]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [99]}}, {"event_type": "FAC", "arguments": [{"text": "explanation information", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["explanation", "information"], "offsets": [124, 125]}, {"text": "promoting", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["promoting"], "offsets": [130]}], "trigger": {"text": "helpful", "tokens": ["helpful"], "offsets": [128]}}, {"event_type": "PUR", "arguments": [{"text": "accuracy of causal reasoning models", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["accuracy", "of", "causal", "reasoning", "models"], "offsets": [132, 135, 136, 137, 138]}, {"text": "stability of causal reasoning models", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["stability", "of", "causal", "reasoning", "models"], "offsets": [134, 135, 136, 137, 138]}], "trigger": {"text": "promoting", "tokens": ["promoting"], "offsets": [130]}}, {"event_type": "FAC", "arguments": [{"text": "especially challenging", "nugget_type": "WEA", "argument_type": "Object", "tokens": ["especially", "challenging"], "offsets": [109, 110]}, {"text": "for the state - of - the - art models", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["for", "the", "state", "-", "of", "-", "the", "-", "art", "models"], "offsets": [111, 112, 113, 114, 115, 116, 117, 118, 119, 120]}, {"text": "generating", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["generating"], "offsets": [101]}], "trigger": {"text": "remains", "tokens": ["remains"], "offsets": [108]}}, {"event_type": "PUR", "arguments": [{"text": "valid explanations", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["valid", "explanations"], "offsets": [102, 103]}, {"text": "for causal facts", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["for", "causal", "facts"], "offsets": [104, 105, 106]}], "trigger": {"text": "generating", "tokens": ["generating"], "offsets": [101]}}], "document": ["understanding", "causality", "has", "vital", "importance", "for", "various", "natural", "language", "processing", "(", "nlp", ")", "applications", ".", "beyond", "the", "labeled", "instances", ",", "conceptual", "explanations", "of", "the", "causality", "can", "provide", "deep", "understanding", "of", "the", "causal", "fact", "to", "facilitate", "the", "causal", "reasoning", "process", ".", "however", ",", "such", "explanation", "information", "still", "remains", "absent", "in", "existing", "causal", "reasoning", "resources", ".", "in", "this", "paper", ",", "we", "fill", "this", "gap", "by", "presenting", "a", "human", "-", "annotated", "explainable", "causal", "reasoning", "dataset", "(", "e", "-", "care", ")", ",", "which", "contains", "over", "20k", "causal", "reasoning", "questions", ",", "together", "with", "natural", "language", "formed", "explanations", "of", "the", "causal", "questions", ".", "experimental", "results", "show", "that", "generating", "valid", "explanations", "for", "causal", "facts", "still", "remains", "especially", "challenging", "for", "the", "state", "-", "of", "-", "the", "-", "art", "models", ",", "and", "the", "explanation", "information", "can", "be", "helpful", "for", "promoting", "the", "accuracy", "and", "stability", "of", "causal", "reasoning", "models", "."]}, {"venue": "ACL", "title": "Verbal Multiword Expressions for Identification of Metaphor", "abstract": "Metaphor is a linguistic device in which a concept is expressed by mentioning another. Identifying metaphorical expressions, therefore, requires a non-compositional understanding of semantics. Multiword Expressions (MWEs), on the other hand, are linguistic phenomena with varying degrees of semantic opacity and their identification poses a challenge to computational models. This work is the first attempt at analysing the interplay of metaphor and MWEs processing through the design of a neural architecture whereby classification of metaphors is enhanced by informing the model of the presence of MWEs. To the best of our knowledge, this is the first \u201cMWE-aware\u201d metaphor identification system paving the way for further experiments on the complex interactions of these phenomena. The results and analyses show that this proposed architecture reach state-of-the-art on two different established metaphor datasets.", "doc_id": "25077ffcad59ea66f65c294a8534df43", "publication_year": 2020, "sentences": ["metaphor is a linguistic device in which a concept is expressed by mentioning another .", "identifying metaphorical expressions , therefore , requires a non - compositional understanding of semantics .", "multiword expressions ( mwes ) , on the other hand , are linguistic phenomena with varying degrees of semantic opacity and their identification poses a challenge to computational models .", "this work is the first attempt at analysing the interplay of metaphor and mwes processing through the design of a neural architecture whereby classification of metaphors is enhanced by informing the model of the presence of mwes .", "to the best of our knowledge , this is the first \u201c mwe - aware \u201d metaphor identification system paving the way for further experiments on the complex interactions of these phenomena .", "the results and analyses show that this proposed architecture reach state - of - the - art on two different established metaphor datasets ."], "events": [{"event_type": "ITT", "arguments": [{"text": "multiword expressions", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["multiword", "expressions"], "offsets": [30, 31]}], "trigger": {"text": "phenomena", "tokens": ["phenomena"], "offsets": [43]}}, {"event_type": "MDS", "arguments": [{"text": "neural architecture", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["neural", "architecture"], "offsets": [80, 81]}, {"text": "analysing", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["analysing"], "offsets": [67]}], "trigger": {"text": "design", "tokens": ["design"], "offsets": [77]}}, {"event_type": "PUR", "arguments": [{"text": "interplay of metaphor and mwes processing", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["interplay", "of", "metaphor", "and", "multiword", "expressions", "processing"], "offsets": [69, 70, 71, 72, 30, 31, 74]}], "trigger": {"text": "analysing", "tokens": ["analysing"], "offsets": [67]}}, {"event_type": "MDS", "arguments": [{"text": "model", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["model"], "offsets": [91]}, {"text": "multiword expressions", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["multiword", "expressions"], "offsets": [30, 31]}, {"text": "enhanced", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["enhanced"], "offsets": [87]}], "trigger": {"text": "informing", "tokens": ["informing"], "offsets": [89]}}, {"event_type": "FAC", "arguments": [{"text": "way", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["way"], "offsets": [119]}, {"text": "further experiments on the complex interactions of these phenomena", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["further", "experiments", "on", "the", "complex", "interactions", "of", "these", "phenomena"], "offsets": [121, 122, 123, 124, 125, 126, 127, 128, 129]}, {"text": "mwe - aware \u201d metaphor identification system", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["mwe", "-", "aware", "metaphor", "identification", "system"], "offsets": [110, 111, 112, 114, 115, 116]}], "trigger": {"text": "paving", "tokens": ["paving"], "offsets": [117]}}, {"event_type": "FAC", "arguments": [{"text": "state - of - the - art", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art"], "offsets": [141, 142, 143, 144, 145, 146, 147]}, {"text": "two different established metaphor datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["two", "different", "established", "metaphor", "datasets"], "offsets": [149, 150, 151, 152, 153]}, {"text": "proposed architecture", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["proposed", "architecture"], "offsets": [138, 139]}], "trigger": {"text": "reach", "tokens": ["reach"], "offsets": [140]}}, {"event_type": "PUR", "arguments": [{"text": "classification of metaphors", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["classification", "of", "metaphors"], "offsets": [83, 84, 85]}], "trigger": {"text": "enhanced", "tokens": ["enhanced"], "offsets": [87]}}], "document": ["metaphor", "is", "a", "linguistic", "device", "in", "which", "a", "concept", "is", "expressed", "by", "mentioning", "another", ".", "identifying", "metaphorical", "expressions", ",", "therefore", ",", "requires", "a", "non", "-", "compositional", "understanding", "of", "semantics", ".", "multiword", "expressions", "(", "mwes", ")", ",", "on", "the", "other", "hand", ",", "are", "linguistic", "phenomena", "with", "varying", "degrees", "of", "semantic", "opacity", "and", "their", "identification", "poses", "a", "challenge", "to", "computational", "models", ".", "this", "work", "is", "the", "first", "attempt", "at", "analysing", "the", "interplay", "of", "metaphor", "and", "mwes", "processing", "through", "the", "design", "of", "a", "neural", "architecture", "whereby", "classification", "of", "metaphors", "is", "enhanced", "by", "informing", "the", "model", "of", "the", "presence", "of", "mwes", ".", "to", "the", "best", "of", "our", "knowledge", ",", "this", "is", "the", "first", "\u201c", "mwe", "-", "aware", "\u201d", "metaphor", "identification", "system", "paving", "the", "way", "for", "further", "experiments", "on", "the", "complex", "interactions", "of", "these", "phenomena", ".", "the", "results", "and", "analyses", "show", "that", "this", "proposed", "architecture", "reach", "state", "-", "of", "-", "the", "-", "art", "on", "two", "different", "established", "metaphor", "datasets", "."]}, {"venue": "ACL", "title": "Inter-sentence Relation Extraction with Document-level Graph Convolutional Neural Network", "abstract": "Inter-sentence relation extraction deals with a number of complex semantic relationships in documents, which require local, non-local, syntactic and semantic dependencies. Existing methods do not fully exploit such dependencies. We present a novel inter-sentence relation extraction model that builds a labelled edge graph convolutional neural network model on a document-level graph. The graph is constructed using various inter- and intra-sentence dependencies to capture local and non-local dependency information. In order to predict the relation of an entity pair, we utilise multi-instance learning with bi-affine pairwise scoring. Experimental results show that our model achieves comparable performance to the state-of-the-art neural models on two biochemistry datasets. Our analysis shows that all the types in the graph are effective for inter-sentence relation extraction.", "doc_id": "38fc5443984f2bc1dd79651bed7e5920", "publication_year": 2019, "sentences": ["inter - sentence relation extraction deals with a number of complex semantic relationships in documents , which require local , non - local , syntactic and semantic dependencies .", "existing methods do not fully exploit such dependencies .", "we present a novel inter - sentence relation extraction model that builds a labelled edge graph convolutional neural network model on a document - level graph .", "the graph is constructed using various inter - and intra - sentence dependencies to capture local and non - local dependency information .", "in order to predict the relation of an entity pair , we utilise multi - instance learning with bi - affine pairwise scoring .", "experimental results show that our model achieves comparable performance to the state - of - the - art neural models on two biochemistry datasets .", "our analysis shows that all the types in the graph are effective for inter - sentence relation extraction ."], "events": [{"event_type": "ITT", "arguments": [{"text": "inter - sentence relation extraction", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["inter", "-", "sentence", "relation", "extraction"], "offsets": [0, 1, 2, 3, 4]}], "trigger": {"text": "deals", "tokens": ["deals"], "offsets": [5]}}, {"event_type": "RWF", "arguments": [{"text": "existing methods", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["existing", "methods"], "offsets": [29, 30]}, {"text": "not fully exploit", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["not", "fully", "exploit"], "offsets": [32, 33, 34]}, {"text": "local , non - local , syntactic dependencies", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["local", ",", "non", "-", "local", ",", "syntactic", "dependencies"], "offsets": [18, 19, 20, 21, 22, 23, 24, 27]}], "trigger": {"text": "not fully exploit", "tokens": ["not", "fully", "exploit"], "offsets": [32, 33, 34]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [38]}, {"text": "inter - sentence relation extraction model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["inter", "-", "sentence", "relation", "extraction", "model"], "offsets": [42, 43, 44, 45, 46, 47]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [39]}}, {"event_type": "RWS", "arguments": [{"text": "capture", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["capture"], "offsets": [79]}, {"text": "graph", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["graph"], "offsets": [66]}, {"text": "inter - sentence dependencies", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["inter", "-", "sentence", "dependencies"], "offsets": [71, 72, 76, 77]}, {"text": "intra - sentence dependencies", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["intra", "-", "sentence", "dependencies"], "offsets": [74, 75, 76, 77]}], "trigger": {"text": "constructed", "tokens": ["constructed"], "offsets": [68]}}, {"event_type": "PUR", "arguments": [{"text": "local dependency information", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["local", "dependency", "information"], "offsets": [80, 85, 86]}, {"text": "non - local dependency information", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["non", "-", "local", "dependency", "information"], "offsets": [82, 83, 84, 85, 86]}], "trigger": {"text": "capture", "tokens": ["capture"], "offsets": [79]}}, {"event_type": "MDS", "arguments": [{"text": "multi - instance learning", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["multi", "-", "instance", "learning"], "offsets": [101, 102, 103, 104]}, {"text": "bi - affine pairwise scoring", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["bi", "-", "affine", "pairwise", "scoring"], "offsets": [106, 107, 108, 109, 110]}, {"text": "predict", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["predict"], "offsets": [91]}], "trigger": {"text": "utilise", "tokens": ["utilise"], "offsets": [100]}}, {"event_type": "PUR", "arguments": [{"text": "relation of an entity pair", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["relation", "of", "an", "entity", "pair"], "offsets": [93, 94, 95, 96, 97]}], "trigger": {"text": "predict", "tokens": ["predict"], "offsets": [91]}}, {"event_type": "FIN", "arguments": [{"text": "achieves", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["achieves"], "offsets": [118]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [114]}}, {"event_type": "CMP", "arguments": [{"text": "inter - sentence relation extraction model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["inter", "-", "sentence", "relation", "extraction", "model"], "offsets": [42, 43, 44, 45, 46, 47]}, {"text": "comparable performance", "nugget_type": "STR", "argument_type": "Result", "tokens": ["comparable", "performance"], "offsets": [119, 120]}, {"text": "state - of - the - art neural models", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art", "neural", "models"], "offsets": [123, 124, 125, 126, 127, 128, 129, 130, 131]}, {"text": "two biochemistry datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["two", "biochemistry", "datasets"], "offsets": [133, 134, 135]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [118]}}, {"event_type": "FIN", "arguments": [{"text": "effective", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["effective"], "offsets": [148]}], "trigger": {"text": "shows", "tokens": ["shows"], "offsets": [139]}}, {"event_type": "FAC", "arguments": [{"text": "all the types in the graph", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["all", "the", "types", "in", "the", "document", "-", "level", "graph"], "offsets": [141, 142, 143, 144, 145, 60, 61, 62, 63]}, {"text": "inter - sentence relation extraction", "nugget_type": "APP", "argument_type": "Object", "tokens": ["inter", "-", "sentence", "relation", "extraction"], "offsets": [150, 151, 152, 153, 154]}], "trigger": {"text": "effective", "tokens": ["effective"], "offsets": [148]}}], "document": ["inter", "-", "sentence", "relation", "extraction", "deals", "with", "a", "number", "of", "complex", "semantic", "relationships", "in", "documents", ",", "which", "require", "local", ",", "non", "-", "local", ",", "syntactic", "and", "semantic", "dependencies", ".", "existing", "methods", "do", "not", "fully", "exploit", "such", "dependencies", ".", "we", "present", "a", "novel", "inter", "-", "sentence", "relation", "extraction", "model", "that", "builds", "a", "labelled", "edge", "graph", "convolutional", "neural", "network", "model", "on", "a", "document", "-", "level", "graph", ".", "the", "graph", "is", "constructed", "using", "various", "inter", "-", "and", "intra", "-", "sentence", "dependencies", "to", "capture", "local", "and", "non", "-", "local", "dependency", "information", ".", "in", "order", "to", "predict", "the", "relation", "of", "an", "entity", "pair", ",", "we", "utilise", "multi", "-", "instance", "learning", "with", "bi", "-", "affine", "pairwise", "scoring", ".", "experimental", "results", "show", "that", "our", "model", "achieves", "comparable", "performance", "to", "the", "state", "-", "of", "-", "the", "-", "art", "neural", "models", "on", "two", "biochemistry", "datasets", ".", "our", "analysis", "shows", "that", "all", "the", "types", "in", "the", "graph", "are", "effective", "for", "inter", "-", "sentence", "relation", "extraction", "."]}, {"venue": "ACL", "title": "LNN-EL: A Neuro-Symbolic Approach to Short-text Entity Linking", "abstract": "Entity linking (EL) is the task of disambiguating mentions appearing in text by linking them to entities in a knowledge graph, a crucial task for text understanding, question answering or conversational systems. In the special case of short-text EL, which poses additional challenges due to limited context, prior approaches have reached good performance by employing heuristics-based methods or purely neural approaches. Here, we take a different, neuro-symbolic approach that combines the advantages of using interpretable rules based on first-order logic with the performance of neural learning. Even though constrained to use rules, we show that we reach competitive or better performance with SoTA black-box neural approaches. Furthermore, our framework has the benefits of extensibility and transferability. We show that we can easily blend existing rule templates given by a human expert, with multiple types of features (priors, BERT encodings, box embeddings, etc), and even with scores resulting from previous EL methods, thus improving on such methods. As an example of improvement, on the LC-QuAD-1.0 dataset, we show more than 3% increase in F1 score relative to previous SoTA. Finally, we show that the inductive bias offered by using logic results in a set of learned rules that transfers from one dataset to another, sometimes without finetuning, while still having high accuracy.", "doc_id": "ef29ab5c667119d6e64caae745600e13", "publication_year": 2021, "sentences": ["entity linking ( el ) is the task of disambiguating mentions appearing in text by linking them to entities in a knowledge graph , a crucial task for text understanding , question answering or conversational systems .", "in the special case of short - text el , which poses additional challenges due to limited context , prior approaches have reached good performance by employing heuristics - based methods or purely neural approaches .", "here , we take a different , neuro - symbolic approach that combines the advantages of using interpretable rules based on first - order logic with the performance of neural learning .", "even though constrained to use rules , we show that we reach competitive or better performance with sota black - box neural approaches .", "furthermore , our framework has the benefits of extensibility and transferability .", "we show that we can easily blend existing rule templates given by a human expert , with multiple types of features ( priors , bert encodings , box embeddings , etc ) , and even with scores resulting from previous el methods , thus improving on such methods .", "as an example of improvement , on the lc - quad - 1 . 0 dataset , we show more than 3 % increase in f1 score relative to previous sota .", "finally , we show that the inductive bias offered by using logic results in a set of learned rules that transfers from one dataset to another , sometimes without finetuning , while still having high accuracy ."], "events": [{"event_type": "ITT", "arguments": [{"text": "entity linking", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["entity", "linking"], "offsets": [0, 1]}], "trigger": {"text": "task", "tokens": ["task"], "offsets": [7]}}, {"event_type": "RWS", "arguments": [{"text": "prior approaches", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["prior", "approaches"], "offsets": [56, 57]}, {"text": "reached", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["reached"], "offsets": [59]}, {"text": "heuristics - based methods", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["heuristics", "-", "based", "methods"], "offsets": [64, 65, 66, 67]}, {"text": "purely neural approaches", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["purely", "neural", "approaches"], "offsets": [69, 70, 71]}], "trigger": {"text": "employing", "tokens": ["employing"], "offsets": [63]}}, {"event_type": "PUR", "arguments": [{"text": "good performance", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["good", "performance"], "offsets": [60, 61]}], "trigger": {"text": "reached", "tokens": ["reached"], "offsets": [59]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [75]}, {"text": "neuro - symbolic approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["neuro", "-", "symbolic", "approach"], "offsets": [80, 81, 82, 83]}], "trigger": {"text": "take", "tokens": ["take"], "offsets": [76]}}, {"event_type": "MDS", "arguments": [{"text": "performance of neural learning", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["performance", "of", "neural", "learning"], "offsets": [100, 101, 102, 103]}, {"text": "interpretable rules", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["interpretable", "rules"], "offsets": [90, 91]}], "trigger": {"text": "combines", "tokens": ["combines"], "offsets": [85]}}, {"event_type": "FAC", "arguments": [{"text": "competitive or better performance", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["competitive", "or", "better", "performance"], "offsets": [117, 118, 119, 120]}, {"text": "sota black - box neural approaches", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["sota", "black", "-", "box", "neural", "approaches"], "offsets": [122, 123, 124, 125, 126, 127]}], "trigger": {"text": "reach", "tokens": ["reach"], "offsets": [116]}}, {"event_type": "FAC", "arguments": [{"text": "framework", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["framework"], "offsets": [132]}, {"text": "benefits", "nugget_type": "STR", "argument_type": "Object", "tokens": ["benefits"], "offsets": [135]}, {"text": "extensibility", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["extensibility"], "offsets": [137]}, {"text": "transferability", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["transferability"], "offsets": [139]}], "trigger": {"text": "has", "tokens": ["has"], "offsets": [133]}}, {"event_type": "MDS", "arguments": [{"text": "improving", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improving"], "offsets": [185]}, {"text": "existing rule templates", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["existing", "rule", "templates"], "offsets": [148, 149, 150]}, {"text": "multiple types of features", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["multiple", "types", "of", "features"], "offsets": [158, 159, 160, 161]}, {"text": "scores", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["scores"], "offsets": [177]}], "trigger": {"text": "blend", "tokens": ["blend"], "offsets": [147]}}, {"event_type": "PUR", "arguments": [{"text": "such methods", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["such", "methods"], "offsets": [187, 188]}], "trigger": {"text": "improving", "tokens": ["improving"], "offsets": [185]}}, {"event_type": "CMP", "arguments": [{"text": "lc - quad - 1 . 0 dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["lc", "-", "quad", "-", "1", ".", "0", "dataset"], "offsets": [198, 199, 200, 201, 202, 203, 204, 205]}, {"text": "3 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["3", "%"], "offsets": [211, 212]}, {"text": "previous sota", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["previous", "sota"], "offsets": [219, 220]}, {"text": "f1 score", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["f1", "score"], "offsets": [215, 216]}, {"text": "neuro - symbolic approach", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["neuro", "-", "symbolic", "approach"], "offsets": [80, 81, 82, 83]}], "trigger": {"text": "increase", "tokens": ["increase"], "offsets": [213]}}, {"event_type": "FAC", "arguments": [{"text": "inductive bias", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["inductive", "bias"], "offsets": [228, 229]}, {"text": "without finetuning", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["without", "finetuning"], "offsets": [250, 251]}, {"text": "while still having high accuracy", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["while", "still", "having", "high", "accuracy"], "offsets": [253, 254, 255, 256, 257]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [225]}}, {"event_type": "MDS", "arguments": [{"text": "inductive bias", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["inductive", "bias"], "offsets": [228, 229]}, {"text": "logic results", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["logic", "results"], "offsets": [233, 234]}], "trigger": {"text": "offered", "tokens": ["offered"], "offsets": [230]}}], "document": ["entity", "linking", "(", "el", ")", "is", "the", "task", "of", "disambiguating", "mentions", "appearing", "in", "text", "by", "linking", "them", "to", "entities", "in", "a", "knowledge", "graph", ",", "a", "crucial", "task", "for", "text", "understanding", ",", "question", "answering", "or", "conversational", "systems", ".", "in", "the", "special", "case", "of", "short", "-", "text", "el", ",", "which", "poses", "additional", "challenges", "due", "to", "limited", "context", ",", "prior", "approaches", "have", "reached", "good", "performance", "by", "employing", "heuristics", "-", "based", "methods", "or", "purely", "neural", "approaches", ".", "here", ",", "we", "take", "a", "different", ",", "neuro", "-", "symbolic", "approach", "that", "combines", "the", "advantages", "of", "using", "interpretable", "rules", "based", "on", "first", "-", "order", "logic", "with", "the", "performance", "of", "neural", "learning", ".", "even", "though", "constrained", "to", "use", "rules", ",", "we", "show", "that", "we", "reach", "competitive", "or", "better", "performance", "with", "sota", "black", "-", "box", "neural", "approaches", ".", "furthermore", ",", "our", "framework", "has", "the", "benefits", "of", "extensibility", "and", "transferability", ".", "we", "show", "that", "we", "can", "easily", "blend", "existing", "rule", "templates", "given", "by", "a", "human", "expert", ",", "with", "multiple", "types", "of", "features", "(", "priors", ",", "bert", "encodings", ",", "box", "embeddings", ",", "etc", ")", ",", "and", "even", "with", "scores", "resulting", "from", "previous", "el", "methods", ",", "thus", "improving", "on", "such", "methods", ".", "as", "an", "example", "of", "improvement", ",", "on", "the", "lc", "-", "quad", "-", "1", ".", "0", "dataset", ",", "we", "show", "more", "than", "3", "%", "increase", "in", "f1", "score", "relative", "to", "previous", "sota", ".", "finally", ",", "we", "show", "that", "the", "inductive", "bias", "offered", "by", "using", "logic", "results", "in", "a", "set", "of", "learned", "rules", "that", "transfers", "from", "one", "dataset", "to", "another", ",", "sometimes", "without", "finetuning", ",", "while", "still", "having", "high", "accuracy", "."]}, {"venue": "ACL", "title": "ProtoTEx: Explaining Model Decisions with Prototype Tensors", "abstract": "We present ProtoTEx, a novel white-box NLP classification architecture based on prototype networks (Li et al., 2018). ProtoTEx faithfully explains model decisions based on prototype tensors that encode latent clusters of training examples. At inference time, classification decisions are based on the distances between the input text and the prototype tensors, explained via the training examples most similar to the most influential prototypes. We also describe a novel interleaved training algorithm that effectively handles classes characterized by ProtoTEx indicative features. On a propaganda detection task, ProtoTEx accuracy matches BART-large and exceeds BERTlarge with the added benefit of providing faithful explanations. A user study also shows that prototype-based explanations help non-experts to better recognize propaganda in online news.", "doc_id": "63e55ea9920a1ce855513098c59195e1", "publication_year": 2022, "sentences": ["we present prototex , a novel white - box nlp classification architecture based on prototype networks ( li et al . , 2018 ) .", "prototex faithfully explains model decisions based on prototype tensors that encode latent clusters of training examples .", "at inference time , classification decisions are based on the distances between the input text and the prototype tensors , explained via the training examples most similar to the most influential prototypes .", "we also describe a novel interleaved training algorithm that effectively handles classes characterized by prototex indicative features .", "on a propaganda detection task , prototex accuracy matches bart - large and exceeds bertlarge with the added benefit of providing faithful explanations .", "a user study also shows that prototype - based explanations help non - experts to better recognize propaganda in online news ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [0]}, {"text": "white - box nlp classification architecture", "nugget_type": "APP", "argument_type": "Content", "tokens": ["white", "-", "box", "nlp", "classification", "architecture"], "offsets": [6, 7, 8, 9, 10, 11]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [1]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [75]}, {"text": "interleaved training algorithm", "nugget_type": "APP", "argument_type": "Content", "tokens": ["interleaved", "training", "algorithm"], "offsets": [80, 81, 82]}, {"text": "effectively handles", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["effectively", "handles"], "offsets": [84, 85]}], "trigger": {"text": "describe", "tokens": ["describe"], "offsets": [77]}}, {"event_type": "CMP", "arguments": [{"text": "bart - large", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["bart", "-", "large"], "offsets": [102, 103, 104]}, {"text": "prototex", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["prototex"], "offsets": [99]}, {"text": "accuracy", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["accuracy"], "offsets": [100]}], "trigger": {"text": "matches", "tokens": ["matches"], "offsets": [101]}}, {"event_type": "CMP", "arguments": [{"text": "accuracy", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["accuracy"], "offsets": [100]}, {"text": "prototex", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["prototex"], "offsets": [99]}, {"text": "exceeds", "nugget_type": "STR", "argument_type": "Result", "tokens": ["exceeds"], "offsets": [106]}], "trigger": {"text": "exceeds", "tokens": ["exceeds"], "offsets": [106]}}, {"event_type": "PUR", "arguments": [{"text": "classes characterized by prototex indicative features", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["classes", "characterized", "by", "prototex", "indicative", "features"], "offsets": [86, 87, 88, 89, 90, 91]}], "trigger": {"text": "effectively handles", "tokens": ["effectively", "handles"], "offsets": [84, 85]}}, {"event_type": "FAC", "arguments": [{"text": "prototex", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["prototex"], "offsets": [99]}, {"text": "faithful explanations", "nugget_type": "STR", "argument_type": "Object", "tokens": ["faithful", "explanations"], "offsets": [114, 115]}], "trigger": {"text": "providing", "tokens": ["providing"], "offsets": [113]}}, {"event_type": "FIN", "arguments": [{"text": "help", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["help"], "offsets": [127]}], "trigger": {"text": "shows", "tokens": ["shows"], "offsets": [121]}}, {"event_type": "FAC", "arguments": [{"text": "prototype - based explanations", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["prototype", "-", "based", "explanations"], "offsets": [123, 124, 125, 126]}, {"text": "non - experts", "nugget_type": "MOD", "argument_type": "Object", "tokens": ["non", "-", "experts"], "offsets": [128, 129, 130]}, {"text": "better recognize", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["better", "recognize"], "offsets": [132, 133]}], "trigger": {"text": "help", "tokens": ["help"], "offsets": [127]}}, {"event_type": "PUR", "arguments": [{"text": "propaganda in online news", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["propaganda", "in", "online", "news"], "offsets": [134, 135, 136, 137]}], "trigger": {"text": "better recognize", "tokens": ["better", "recognize"], "offsets": [132, 133]}}, {"event_type": "MDS", "arguments": [{"text": "training examples most similar to the most influential prototypes", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["training", "examples", "most", "similar", "to", "the", "most", "influential", "prototypes"], "offsets": [65, 66, 67, 68, 69, 70, 71, 72, 73]}, {"text": "distances between the input text and the prototype tensors", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["distances", "between", "the", "input", "text", "and", "the", "prototype", "tensors"], "offsets": [52, 53, 54, 55, 56, 57, 58, 59, 60]}], "trigger": {"text": "explained", "tokens": ["explained"], "offsets": [62]}}], "document": ["we", "present", "prototex", ",", "a", "novel", "white", "-", "box", "nlp", "classification", "architecture", "based", "on", "prototype", "networks", "(", "li", "et", "al", ".", ",", "2018", ")", ".", "prototex", "faithfully", "explains", "model", "decisions", "based", "on", "prototype", "tensors", "that", "encode", "latent", "clusters", "of", "training", "examples", ".", "at", "inference", "time", ",", "classification", "decisions", "are", "based", "on", "the", "distances", "between", "the", "input", "text", "and", "the", "prototype", "tensors", ",", "explained", "via", "the", "training", "examples", "most", "similar", "to", "the", "most", "influential", "prototypes", ".", "we", "also", "describe", "a", "novel", "interleaved", "training", "algorithm", "that", "effectively", "handles", "classes", "characterized", "by", "prototex", "indicative", "features", ".", "on", "a", "propaganda", "detection", "task", ",", "prototex", "accuracy", "matches", "bart", "-", "large", "and", "exceeds", "bertlarge", "with", "the", "added", "benefit", "of", "providing", "faithful", "explanations", ".", "a", "user", "study", "also", "shows", "that", "prototype", "-", "based", "explanations", "help", "non", "-", "experts", "to", "better", "recognize", "propaganda", "in", "online", "news", "."]}, {"venue": "ACL", "title": "Generating Question Relevant Captions to Aid Visual Question Answering", "abstract": "Visual question answering (VQA) and image captioning require a shared body of general knowledge connecting language and vision. We present a novel approach to better VQA performance that exploits this connection by jointly generating captions that are targeted to help answer a specific visual question. The model is trained using an existing caption dataset by automatically determining question-relevant captions using an online gradient-based method. Experimental results on the VQA v2 challenge demonstrates that our approach obtains state-of-the-art VQA performance (e.g. 68.4% in the Test-standard set using a single model) by simultaneously generating question-relevant captions.", "doc_id": "7cb1c9038adfb42c5749a64991f4fff5", "publication_year": 2019, "sentences": ["visual question answering ( vqa ) and image captioning require a shared body of general knowledge connecting language and vision .", "we present a novel approach to better vqa performance that exploits this connection by jointly generating captions that are targeted to help answer a specific visual question .", "the model is trained using an existing caption dataset by automatically determining question - relevant captions using an online gradient - based method .", "experimental results on the vqa v2 challenge demonstrates that our approach obtains state - of - the - art vqa performance ( e . g . 68 . 4 % in the test - standard set using a single model ) by simultaneously generating question - relevant captions ."], "events": [{"event_type": "ITT", "arguments": [{"text": "visual question answering ( vqa ) and image captioning", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["visual", "question", "answering", "(", "vqa", ")", "and", "image", "captioning"], "offsets": [0, 1, 2, 3, 4, 5, 6, 7, 8]}], "trigger": {"text": "require", "tokens": ["require"], "offsets": [9]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [21]}, {"text": "approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["approach"], "offsets": [25]}, {"text": "better", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["better"], "offsets": [27]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [22]}}, {"event_type": "PUR", "arguments": [{"text": "vqa performance", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["vqa", "performance"], "offsets": [28, 29]}], "trigger": {"text": "better", "tokens": ["better"], "offsets": [27]}}, {"event_type": "MDS", "arguments": [{"text": "connection", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["connection"], "offsets": [33]}, {"text": "captions", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["captions"], "offsets": [37]}], "trigger": {"text": "exploits", "tokens": ["exploits"], "offsets": [31]}}, {"event_type": "MDS", "arguments": [{"text": "existing caption dataset", "nugget_type": "DST", "argument_type": "TriedComponent", "tokens": ["existing", "caption", "dataset"], "offsets": [55, 56, 57]}], "trigger": {"text": "trained", "tokens": ["trained"], "offsets": [52]}}, {"event_type": "FIN", "arguments": [{"text": "obtains", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["obtains"], "offsets": [84]}], "trigger": {"text": "demonstrates", "tokens": ["demonstrates"], "offsets": [80]}}, {"event_type": "FAC", "arguments": [{"text": "approach", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["approach"], "offsets": [25]}, {"text": "state - of - the - art vqa performance", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art", "vqa", "performance"], "offsets": [85, 86, 87, 88, 89, 90, 91, 92, 93]}, {"text": "68 . 4 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["68", ".", "4", "%"], "offsets": [99, 100, 101, 102]}, {"text": "in the test - standard set using a single model", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "test", "-", "standard", "set", "using", "a", "single", "model"], "offsets": [103, 104, 105, 106, 107, 108, 109, 110, 111, 112]}], "trigger": {"text": "obtains", "tokens": ["obtains"], "offsets": [84]}}, {"event_type": "MDS", "arguments": [{"text": "question - relevant captions", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["question", "-", "relevant", "captions"], "offsets": [61, 62, 63, 64]}, {"text": "online gradient - based method", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["online", "gradient", "-", "based", "method"], "offsets": [67, 68, 69, 70, 71]}], "trigger": {"text": "automatically determining", "tokens": ["automatically", "determining"], "offsets": [59, 60]}}], "document": ["visual", "question", "answering", "(", "vqa", ")", "and", "image", "captioning", "require", "a", "shared", "body", "of", "general", "knowledge", "connecting", "language", "and", "vision", ".", "we", "present", "a", "novel", "approach", "to", "better", "vqa", "performance", "that", "exploits", "this", "connection", "by", "jointly", "generating", "captions", "that", "are", "targeted", "to", "help", "answer", "a", "specific", "visual", "question", ".", "the", "model", "is", "trained", "using", "an", "existing", "caption", "dataset", "by", "automatically", "determining", "question", "-", "relevant", "captions", "using", "an", "online", "gradient", "-", "based", "method", ".", "experimental", "results", "on", "the", "vqa", "v2", "challenge", "demonstrates", "that", "our", "approach", "obtains", "state", "-", "of", "-", "the", "-", "art", "vqa", "performance", "(", "e", ".", "g", ".", "68", ".", "4", "%", "in", "the", "test", "-", "standard", "set", "using", "a", "single", "model", ")", "by", "simultaneously", "generating", "question", "-", "relevant", "captions", "."]}, {"venue": "ACL", "title": "Finding Your Voice: The Linguistic Development of Mental Health Counselors", "abstract": "Mental health counseling is an enterprise with profound societal importance where conversations play a primary role. In order to acquire the conversational skills needed to face a challenging range of situations, mental health counselors must rely on training and on continued experience with actual clients. However, in the absence of large scale longitudinal studies, the nature and significance of this developmental process remain unclear. For example, prior literature suggests that experience might not translate into consequential changes in counselor behavior. This has led some to even argue that counseling is a profession without expertise. In this work, we develop a computational framework to quantify the extent to which individuals change their linguistic behavior with experience and to study the nature of this evolution. We use our framework to conduct a large longitudinal study of mental health counseling conversations, tracking over 3,400 counselors across their tenure. We reveal that overall, counselors do indeed change their conversational behavior to become more diverse across interactions, developing an individual voice that distinguishes them from other counselors. Furthermore, a finer-grained investigation shows that the rate and nature of this diversification vary across functionally different conversational components.", "doc_id": "43b0500c7f95053ce98441ffb4825c2e", "publication_year": 2019, "sentences": ["mental health counseling is an enterprise with profound societal importance where conversations play a primary role .", "in order to acquire the conversational skills needed to face a challenging range of situations , mental health counselors must rely on training and on continued experience with actual clients .", "however , in the absence of large scale longitudinal studies , the nature and significance of this developmental process remain unclear .", "for example , prior literature suggests that experience might not translate into consequential changes in counselor behavior .", "this has led some to even argue that counseling is a profession without expertise .", "in this work , we develop a computational framework to quantify the extent to which individuals change their linguistic behavior with experience and to study the nature of this evolution .", "we use our framework to conduct a large longitudinal study of mental health counseling conversations , tracking over 3 , 400 counselors across their tenure .", "we reveal that overall , counselors do indeed change their conversational behavior to become more diverse across interactions , developing an individual voice that distinguishes them from other counselors .", "furthermore , a finer - grained investigation shows that the rate and nature of this diversification vary across functionally different conversational components ."], "events": [{"event_type": "ITT", "arguments": [{"text": "mental health counseling", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["mental", "health", "counseling"], "offsets": [0, 1, 2]}], "trigger": {"text": "enterprise", "tokens": ["enterprise"], "offsets": [5]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [107]}, {"text": "computational framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["computational", "framework"], "offsets": [110, 111]}, {"text": "quantify", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["quantify"], "offsets": [113]}, {"text": "study", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["study"], "offsets": [127]}], "trigger": {"text": "develop", "tokens": ["develop"], "offsets": [108]}}, {"event_type": "PUR", "arguments": [{"text": "extent", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["extent"], "offsets": [115]}], "trigger": {"text": "quantify", "tokens": ["quantify"], "offsets": [113]}}, {"event_type": "PUR", "arguments": [{"text": "nature of this evolution", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["nature", "of", "this", "evolution"], "offsets": [129, 130, 131, 132]}], "trigger": {"text": "study", "tokens": ["study"], "offsets": [127]}}, {"event_type": "WKS", "arguments": [{"text": "computational framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["computational", "framework"], "offsets": [110, 111]}, {"text": "conduct", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["conduct"], "offsets": [139]}, {"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [134]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [135]}}, {"event_type": "PUR", "arguments": [{"text": "large longitudinal study of mental health counseling conversations", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["large", "longitudinal", "study", "of", "mental", "health", "counseling", "conversations"], "offsets": [141, 142, 143, 144, 145, 146, 147, 148]}], "trigger": {"text": "conduct", "tokens": ["conduct"], "offsets": [139]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [134]}, {"text": "over 3 , 400 counselors", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["over", "3", ",", "400", "counselors"], "offsets": [151, 152, 153, 154, 155]}, {"text": "across their tenure", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["across", "over", "3", ",", "400", "counselors", "tenure"], "offsets": [156, 151, 152, 153, 154, 155, 158]}], "trigger": {"text": "tracking", "tokens": ["tracking"], "offsets": [150]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [160]}, {"text": "change", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["change"], "offsets": [168]}], "trigger": {"text": "reveal", "tokens": ["reveal"], "offsets": [161]}}, {"event_type": "FAC", "arguments": [{"text": "conversational behavior", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["conversational", "behavior"], "offsets": [170, 171]}, {"text": "indeed", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["indeed"], "offsets": [167]}, {"text": "counselors", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["counselors"], "offsets": [165]}], "trigger": {"text": "change", "tokens": ["change"], "offsets": [168]}}, {"event_type": "CMP", "arguments": [{"text": "across interactions", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["across", "interactions"], "offsets": [176, 177]}, {"text": "counselors", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["counselors"], "offsets": [165]}, {"text": "conversational behavior to become more diverse", "nugget_type": "STR", "argument_type": "Result", "tokens": ["conversational", "behavior", "to", "become", "more", "diverse"], "offsets": [170, 171, 172, 173, 174, 175]}], "trigger": {"text": "change", "tokens": ["change"], "offsets": [168]}}, {"event_type": "FAC", "arguments": [{"text": "individual voice", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["individual", "voice"], "offsets": [181, 182]}, {"text": "counselors", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["counselors"], "offsets": [165]}], "trigger": {"text": "developing", "tokens": ["developing"], "offsets": [179]}}, {"event_type": "FIN", "arguments": [{"text": "vary", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["vary"], "offsets": [206]}], "trigger": {"text": "shows", "tokens": ["shows"], "offsets": [197]}}, {"event_type": "FAC", "arguments": [{"text": "across functionally different conversational components", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["across", "functionally", "different", "conversational", "components"], "offsets": [207, 208, 209, 210, 211]}, {"text": "rate of this diversification", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["rate", "of", "this", "diversification"], "offsets": [200, 203, 204, 205]}, {"text": "nature of this diversification", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["nature", "of", "this", "diversification"], "offsets": [202, 203, 204, 205]}], "trigger": {"text": "vary", "tokens": ["vary"], "offsets": [206]}}], "document": ["mental", "health", "counseling", "is", "an", "enterprise", "with", "profound", "societal", "importance", "where", "conversations", "play", "a", "primary", "role", ".", "in", "order", "to", "acquire", "the", "conversational", "skills", "needed", "to", "face", "a", "challenging", "range", "of", "situations", ",", "mental", "health", "counselors", "must", "rely", "on", "training", "and", "on", "continued", "experience", "with", "actual", "clients", ".", "however", ",", "in", "the", "absence", "of", "large", "scale", "longitudinal", "studies", ",", "the", "nature", "and", "significance", "of", "this", "developmental", "process", "remain", "unclear", ".", "for", "example", ",", "prior", "literature", "suggests", "that", "experience", "might", "not", "translate", "into", "consequential", "changes", "in", "counselor", "behavior", ".", "this", "has", "led", "some", "to", "even", "argue", "that", "counseling", "is", "a", "profession", "without", "expertise", ".", "in", "this", "work", ",", "we", "develop", "a", "computational", "framework", "to", "quantify", "the", "extent", "to", "which", "individuals", "change", "their", "linguistic", "behavior", "with", "experience", "and", "to", "study", "the", "nature", "of", "this", "evolution", ".", "we", "use", "our", "framework", "to", "conduct", "a", "large", "longitudinal", "study", "of", "mental", "health", "counseling", "conversations", ",", "tracking", "over", "3", ",", "400", "counselors", "across", "their", "tenure", ".", "we", "reveal", "that", "overall", ",", "counselors", "do", "indeed", "change", "their", "conversational", "behavior", "to", "become", "more", "diverse", "across", "interactions", ",", "developing", "an", "individual", "voice", "that", "distinguishes", "them", "from", "other", "counselors", ".", "furthermore", ",", "a", "finer", "-", "grained", "investigation", "shows", "that", "the", "rate", "and", "nature", "of", "this", "diversification", "vary", "across", "functionally", "different", "conversational", "components", "."]}, {"venue": "ACL", "title": "Speak to your Parser: Interactive Text-to-SQL with Natural Language Feedback", "abstract": "We study the task of semantic parse correction with natural language feedback. Given a natural language utterance, most semantic parsing systems pose the problem as one-shot translation where the utterance is mapped to a corresponding logical form. In this paper, we investigate a more interactive scenario where humans can further interact with the system by providing free-form natural language feedback to correct the system when it generates an inaccurate interpretation of an initial utterance. We focus on natural language to SQL systems and construct, SPLASH, a dataset of utterances, incorrect SQL interpretations and the corresponding natural language feedback. We compare various reference models for the correction task and show that incorporating such a rich form of feedback can significantly improve the overall semantic parsing accuracy while retaining the flexibility of natural language interaction. While we estimated human correction accuracy is 81.5%, our best model achieves only 25.1%, which leaves a large gap for improvement in future research. SPLASH is publicly available at https://aka.ms/Splash_dataset.", "doc_id": "435cd3d22bfa0f9ad55eb40d8491d2c9", "publication_year": 2020, "sentences": ["we study the task of semantic parse correction with natural language feedback .", "given a natural language utterance , most semantic parsing systems pose the problem as one - shot translation where the utterance is mapped to a corresponding logical form .", "in this paper , we investigate a more interactive scenario where humans can further interact with the system by providing free - form natural language feedback to correct the system when it generates an inaccurate interpretation of an initial utterance .", "we focus on natural language to sql systems and construct , splash , a dataset of utterances , incorrect sql interpretations and the corresponding natural language feedback .", "we compare various reference models for the correction task and show that incorporating such a rich form of feedback can significantly improve the overall semantic parsing accuracy while retaining the flexibility of natural language interaction .", "while we estimated human correction accuracy is 81 . 5 % , our best model achieves only 25 . 1 % , which leaves a large gap for improvement in future research .", "splash is publicly available at https : / / aka . ms / splash _ dataset ."], "events": [{"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [0]}, {"text": "task of semantic parse correction", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["task", "of", "semantic", "parse", "correction"], "offsets": [3, 4, 5, 6, 7]}], "trigger": {"text": "study", "tokens": ["study"], "offsets": [1]}}, {"event_type": "RWS", "arguments": [{"text": "semantic parsing systems", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["semantic", "parsing", "systems"], "offsets": [20, 21, 22]}, {"text": "utterance", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["utterance"], "offsets": [33]}, {"text": "corresponding logical form", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["corresponding", "logical", "form"], "offsets": [38, 39, 40]}], "trigger": {"text": "mapped", "tokens": ["mapped"], "offsets": [35]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [46]}, {"text": "more interactive scenario", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["more", "interactive", "scenario"], "offsets": [49, 50, 51]}, {"text": "where humans can further interact with the system", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["where", "humans", "can", "further", "interact", "with", "the", "system"], "offsets": [52, 53, 54, 55, 56, 57, 58, 59]}], "trigger": {"text": "investigate", "tokens": ["investigate"], "offsets": [47]}}, {"event_type": "MDS", "arguments": [{"text": "correct the system when it generates an inaccurate interpretation of an initial utterance", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["correct", "the", "system", "when", "system", "generates", "an", "inaccurate", "interpretation", "of", "an", "initial", "utterance"], "offsets": [69, 70, 71, 72, 71, 74, 75, 76, 77, 78, 79, 80, 81]}, {"text": "free - form natural language feedback", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["free", "-", "form", "natural", "language", "feedback"], "offsets": [62, 63, 64, 65, 66, 67]}], "trigger": {"text": "providing", "tokens": ["providing"], "offsets": [61]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [83]}, {"text": "natural language to sql systems", "nugget_type": "APP", "argument_type": "Content", "tokens": ["natural", "language", "to", "sql", "systems"], "offsets": [86, 87, 88, 89, 90]}], "trigger": {"text": "focus on", "tokens": ["focus", "on"], "offsets": [84, 85]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [83]}, {"text": "dataset of utterances", "nugget_type": "DST", "argument_type": "Content", "tokens": ["dataset", "of", "utterances"], "offsets": [97, 98, 99]}], "trigger": {"text": "construct", "tokens": ["construct"], "offsets": [92]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [111]}, {"text": "various reference models", "nugget_type": "APP", "argument_type": "Content", "tokens": ["various", "reference", "models"], "offsets": [113, 114, 115]}, {"text": "correction task", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["correction", "task"], "offsets": [118, 119]}], "trigger": {"text": "compare", "tokens": ["compare"], "offsets": [112]}}, {"event_type": "FAC", "arguments": [{"text": "overall semantic parsing accuracy", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["overall", "semantic", "parsing", "accuracy"], "offsets": [134, 135, 136, 137]}, {"text": "rich form of feedback", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["rich", "form", "of", "feedback"], "offsets": [126, 127, 128, 129]}, {"text": "significantly", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significantly"], "offsets": [131]}, {"text": "while retaining the flexibility of natural language interaction", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["while", "retaining", "the", "flexibility", "of", "natural", "language", "interaction"], "offsets": [138, 139, 140, 141, 142, 143, 144, 145]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [132]}}, {"event_type": "FAC", "arguments": [{"text": "best model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["best", "model"], "offsets": [160, 161]}, {"text": "only 25 . 1 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["only", "25", ".", "1", "%"], "offsets": [163, 164, 165, 166, 167]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [162]}}, {"event_type": "PUR", "arguments": [{"text": "system", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["system"], "offsets": [71]}, {"text": "when it generates an inaccurate interpretation of an initial utterance", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "system", "generates", "an", "inaccurate", "interpretation", "of", "an", "initial", "utterance"], "offsets": [72, 71, 74, 75, 76, 77, 78, 79, 80, 81]}], "trigger": {"text": "correct", "tokens": ["correct"], "offsets": [69]}}], "document": ["we", "study", "the", "task", "of", "semantic", "parse", "correction", "with", "natural", "language", "feedback", ".", "given", "a", "natural", "language", "utterance", ",", "most", "semantic", "parsing", "systems", "pose", "the", "problem", "as", "one", "-", "shot", "translation", "where", "the", "utterance", "is", "mapped", "to", "a", "corresponding", "logical", "form", ".", "in", "this", "paper", ",", "we", "investigate", "a", "more", "interactive", "scenario", "where", "humans", "can", "further", "interact", "with", "the", "system", "by", "providing", "free", "-", "form", "natural", "language", "feedback", "to", "correct", "the", "system", "when", "it", "generates", "an", "inaccurate", "interpretation", "of", "an", "initial", "utterance", ".", "we", "focus", "on", "natural", "language", "to", "sql", "systems", "and", "construct", ",", "splash", ",", "a", "dataset", "of", "utterances", ",", "incorrect", "sql", "interpretations", "and", "the", "corresponding", "natural", "language", "feedback", ".", "we", "compare", "various", "reference", "models", "for", "the", "correction", "task", "and", "show", "that", "incorporating", "such", "a", "rich", "form", "of", "feedback", "can", "significantly", "improve", "the", "overall", "semantic", "parsing", "accuracy", "while", "retaining", "the", "flexibility", "of", "natural", "language", "interaction", ".", "while", "we", "estimated", "human", "correction", "accuracy", "is", "81", ".", "5", "%", ",", "our", "best", "model", "achieves", "only", "25", ".", "1", "%", ",", "which", "leaves", "a", "large", "gap", "for", "improvement", "in", "future", "research", ".", "splash", "is", "publicly", "available", "at", "https", ":", "/", "/", "aka", ".", "ms", "/", "splash", "_", "dataset", "."]}, {"venue": "ACL", "title": "Triangular Transfer: Freezing the Pivot for Triangular Machine Translation", "abstract": "Triangular machine translation is a special case of low-resource machine translation where the language pair of interest has limited parallel data, but both languages have abundant parallel data with a pivot language. Naturally, the key to triangular machine translation is the successful exploitation of such auxiliary data. In this work, we propose a transfer-learning-based approach that utilizes all types of auxiliary data. As we train auxiliary source-pivot and pivot-target translation models, we initialize some parameters of the pivot side with a pre-trained language model and freeze them to encourage both translation models to work in the same pivot language space, so that they can be smoothly transferred to the source-target translation model. Experiments show that our approach can outperform previous ones.", "doc_id": "ff648381ad43fd5cbcb7a29499c5331b", "publication_year": 2022, "sentences": ["triangular machine translation is a special case of low - resource machine translation where the language pair of interest has limited parallel data , but both languages have abundant parallel data with a pivot language .", "naturally , the key to triangular machine translation is the successful exploitation of such auxiliary data .", "in this work , we propose a transfer - learning - based approach that utilizes all types of auxiliary data .", "as we train auxiliary source - pivot and pivot - target translation models , we initialize some parameters of the pivot side with a pre - trained language model and freeze them to encourage both translation models to work in the same pivot language space , so that they can be smoothly transferred to the source - target translation model .", "experiments show that our approach can outperform previous ones ."], "events": [{"event_type": "ITT", "arguments": [{"text": "triangular machine translation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["triangular", "machine", "translation"], "offsets": [0, 1, 2]}], "trigger": {"text": "special case of low - resource machine translation", "tokens": ["special", "case", "of", "low", "-", "resource", "machine", "translation"], "offsets": [5, 6, 7, 8, 9, 10, 11, 12]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [57]}, {"text": "transfer - learning - based approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["transfer", "-", "learning", "-", "based", "approach"], "offsets": [60, 61, 62, 63, 64, 65]}, {"text": "utilizes", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["utilizes"], "offsets": [67]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [58]}}, {"event_type": "PUR", "arguments": [{"text": "all types of auxiliary data", "nugget_type": "DST", "argument_type": "Aim", "tokens": ["all", "types", "of", "auxiliary", "data"], "offsets": [68, 69, 70, 71, 72]}], "trigger": {"text": "utilizes", "tokens": ["utilizes"], "offsets": [67]}}, {"event_type": "MDS", "arguments": [{"text": "as we train auxiliary source - pivot and pivot - target translation models", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["as", "we", "train", "auxiliary", "source", "-", "pivot", "and", "pivot", "-", "target", "translation", "models"], "offsets": [74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86]}, {"text": "with a pre - trained language model", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "a", "pre", "-", "trained", "language", "model"], "offsets": [96, 97, 98, 99, 100, 101, 102]}, {"text": "parameters of the pivot side", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["parameters", "of", "the", "pivot", "side"], "offsets": [91, 92, 93, 94, 95]}], "trigger": {"text": "initialize", "tokens": ["initialize"], "offsets": [89]}}, {"event_type": "MDS", "arguments": [{"text": "encourage", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["encourage"], "offsets": [107]}, {"text": "parameters of the pivot side", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["parameters", "of", "the", "pivot", "side"], "offsets": [91, 92, 93, 94, 95]}], "trigger": {"text": "freeze", "tokens": ["freeze"], "offsets": [104]}}, {"event_type": "PUR", "arguments": [{"text": "translation models", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["translation", "models"], "offsets": [109, 110]}, {"text": "to work in the same pivot language space", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["to", "work", "in", "the", "same", "pivot", "language", "space"], "offsets": [111, 112, 113, 114, 115, 116, 117, 118]}], "trigger": {"text": "encourage", "tokens": ["encourage"], "offsets": [107]}}, {"event_type": "MDS", "arguments": [{"text": "source - target translation model", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["source", "-", "target", "translation", "model"], "offsets": [129, 130, 131, 132, 133]}, {"text": "parameters of the pivot side", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["parameters", "of", "the", "pivot", "side"], "offsets": [91, 92, 93, 94, 95]}], "trigger": {"text": "transferred", "tokens": ["transferred"], "offsets": [126]}}, {"event_type": "FIN", "arguments": [{"text": "outperform", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperform"], "offsets": [141]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [136]}}, {"event_type": "CMP", "arguments": [{"text": "transfer - learning - based approach", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["transfer", "-", "learning", "-", "based", "approach"], "offsets": [60, 61, 62, 63, 64, 65]}, {"text": "outperform", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperform"], "offsets": [141]}], "trigger": {"text": "outperform", "tokens": ["outperform"], "offsets": [141]}}], "document": ["triangular", "machine", "translation", "is", "a", "special", "case", "of", "low", "-", "resource", "machine", "translation", "where", "the", "language", "pair", "of", "interest", "has", "limited", "parallel", "data", ",", "but", "both", "languages", "have", "abundant", "parallel", "data", "with", "a", "pivot", "language", ".", "naturally", ",", "the", "key", "to", "triangular", "machine", "translation", "is", "the", "successful", "exploitation", "of", "such", "auxiliary", "data", ".", "in", "this", "work", ",", "we", "propose", "a", "transfer", "-", "learning", "-", "based", "approach", "that", "utilizes", "all", "types", "of", "auxiliary", "data", ".", "as", "we", "train", "auxiliary", "source", "-", "pivot", "and", "pivot", "-", "target", "translation", "models", ",", "we", "initialize", "some", "parameters", "of", "the", "pivot", "side", "with", "a", "pre", "-", "trained", "language", "model", "and", "freeze", "them", "to", "encourage", "both", "translation", "models", "to", "work", "in", "the", "same", "pivot", "language", "space", ",", "so", "that", "they", "can", "be", "smoothly", "transferred", "to", "the", "source", "-", "target", "translation", "model", ".", "experiments", "show", "that", "our", "approach", "can", "outperform", "previous", "ones", "."]}, {"venue": "ACL", "title": "Syntax-Enhanced Pre-trained Model", "abstract": "We study the problem of leveraging the syntactic structure of text to enhance pre-trained models such as BERT and RoBERTa. Existing methods utilize syntax of text either in the pre-training stage or in the fine-tuning stage, so that they suffer from discrepancy between the two stages. Such a problem would lead to the necessity of having human-annotated syntactic information, which limits the application of existing methods to broader scenarios. To address this, we present a model that utilizes the syntax of text in both pre-training and fine-tuning stages. Our model is based on Transformer with a syntax-aware attention layer that considers the dependency tree of the text. We further introduce a new pre-training task of predicting the syntactic distance among tokens in the dependency tree. We evaluate the model on three downstream tasks, including relation classification, entity typing, and question answering. Results show that our model achieves state-of-the-art performance on six public benchmark datasets. We have two major findings. First, we demonstrate that infusing automatically produced syntax of text improves pre-trained models. Second, global syntactic distances among tokens bring larger performance gains compared to local head relations between contiguous tokens.", "doc_id": "f47cd5e3d0618bf88e3562a95793639f", "publication_year": 2021, "sentences": ["we study the problem of leveraging the syntactic structure of text to enhance pre - trained models such as bert and roberta .", "existing methods utilize syntax of text either in the pre - training stage or in the fine - tuning stage , so that they suffer from discrepancy between the two stages .", "such a problem would lead to the necessity of having human - annotated syntactic information , which limits the application of existing methods to broader scenarios .", "to address this , we present a model that utilizes the syntax of text in both pre - training and fine - tuning stages .", "our model is based on transformer with a syntax - aware attention layer that considers the dependency tree of the text .", "we further introduce a new pre - training task of predicting the syntactic distance among tokens in the dependency tree .", "we evaluate the model on three downstream tasks , including relation classification , entity typing , and question answering .", "results show that our model achieves state - of - the - art performance on six public benchmark datasets .", "we have two major findings .", "first , we demonstrate that infusing automatically produced syntax of text improves pre - trained models .", "second , global syntactic distances among tokens bring larger performance gains compared to local head relations between contiguous tokens ."], "events": [{"event_type": "ITT", "arguments": [{"text": "pre - trained models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["pre", "-", "trained", "models"], "offsets": [13, 14, 15, 16]}], "trigger": {"text": "enhance", "tokens": ["enhance"], "offsets": [12]}}, {"event_type": "RWS", "arguments": [{"text": "existing methods", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["existing", "methods"], "offsets": [23, 24]}, {"text": "syntax of text", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["syntax", "of", "text"], "offsets": [26, 27, 28]}, {"text": "either in the pre - training stage or in the fine - tuning stage", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["either", "in", "the", "pre", "-", "training", "stage", "or", "in", "the", "fine", "-", "tuning", "stage"], "offsets": [29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42]}], "trigger": {"text": "utilize", "tokens": ["utilize"], "offsets": [25]}}, {"event_type": "RWF", "arguments": [{"text": "existing methods utilize", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["existing", "methods", "utilize"], "offsets": [23, 24, 25]}, {"text": "discrepancy between the two stages", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["discrepancy", "between", "the", "two", "stages"], "offsets": [49, 50, 51, 52, 53]}], "trigger": {"text": "suffer", "tokens": ["suffer"], "offsets": [47]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [129]}, {"text": "pre - training task", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["pre", "-", "training", "task"], "offsets": [134, 135, 136, 137]}, {"text": "predicting", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["predicting"], "offsets": [139]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [131]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [150]}, {"text": "on three downstream tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "three", "downstream", "tasks"], "offsets": [154, 155, 156, 157]}, {"text": "model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["model"], "offsets": [153]}], "trigger": {"text": "evaluate", "tokens": ["evaluate"], "offsets": [151]}}, {"event_type": "FIN", "arguments": [{"text": "achieves", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["achieves"], "offsets": [175]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [171]}}, {"event_type": "FAC", "arguments": [{"text": "state - of - the - art performance", "nugget_type": "STR", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art", "performance"], "offsets": [176, 177, 178, 179, 180, 181, 182, 183]}, {"text": "six public benchmark datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["six", "public", "benchmark", "datasets"], "offsets": [185, 186, 187, 188]}, {"text": "model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["model"], "offsets": [174]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [175]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [198]}, {"text": "improves", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["improves"], "offsets": [207]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [199]}}, {"event_type": "FAC", "arguments": [{"text": "pre - trained models", "nugget_type": "APP", "argument_type": "Object", "tokens": ["pre", "-", "trained", "models"], "offsets": [208, 209, 210, 211]}, {"text": "syntax of text", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["syntax", "of", "text"], "offsets": [204, 205, 206]}], "trigger": {"text": "improves", "tokens": ["improves"], "offsets": [207]}}, {"event_type": "CMP", "arguments": [{"text": "larger performance gains", "nugget_type": "STR", "argument_type": "Result", "tokens": ["larger", "performance", "gains"], "offsets": [221, 222, 223]}, {"text": "global syntactic distances among tokens", "nugget_type": "FEA", "argument_type": "Arg1", "tokens": ["global", "syntactic", "distances", "among", "tokens"], "offsets": [215, 216, 217, 218, 219]}, {"text": "local head relations between contiguous tokens", "nugget_type": "FEA", "argument_type": "Arg2", "tokens": ["local", "head", "relations", "between", "contiguous", "tokens"], "offsets": [226, 227, 228, 229, 230, 231]}], "trigger": {"text": "bring", "tokens": ["bring"], "offsets": [220]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [86]}, {"text": "syntax of text", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["syntax", "of", "text"], "offsets": [93, 94, 95]}, {"text": "in both pre - training and fine - tuning stages", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "both", "pre", "-", "training", "and", "fine", "-", "tuning", "stages"], "offsets": [96, 97, 98, 99, 100, 101, 102, 103, 104, 105]}], "trigger": {"text": "utilizes", "tokens": ["utilizes"], "offsets": [91]}}, {"event_type": "PUR", "arguments": [{"text": "syntactic distance among tokens", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["syntactic", "distance", "among", "tokens"], "offsets": [141, 142, 143, 144]}, {"text": "in the dependency tree", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "dependency", "tree"], "offsets": [145, 146, 147, 148]}], "trigger": {"text": "predicting", "tokens": ["predicting"], "offsets": [139]}}], "document": ["we", "study", "the", "problem", "of", "leveraging", "the", "syntactic", "structure", "of", "text", "to", "enhance", "pre", "-", "trained", "models", "such", "as", "bert", "and", "roberta", ".", "existing", "methods", "utilize", "syntax", "of", "text", "either", "in", "the", "pre", "-", "training", "stage", "or", "in", "the", "fine", "-", "tuning", "stage", ",", "so", "that", "they", "suffer", "from", "discrepancy", "between", "the", "two", "stages", ".", "such", "a", "problem", "would", "lead", "to", "the", "necessity", "of", "having", "human", "-", "annotated", "syntactic", "information", ",", "which", "limits", "the", "application", "of", "existing", "methods", "to", "broader", "scenarios", ".", "to", "address", "this", ",", "we", "present", "a", "model", "that", "utilizes", "the", "syntax", "of", "text", "in", "both", "pre", "-", "training", "and", "fine", "-", "tuning", "stages", ".", "our", "model", "is", "based", "on", "transformer", "with", "a", "syntax", "-", "aware", "attention", "layer", "that", "considers", "the", "dependency", "tree", "of", "the", "text", ".", "we", "further", "introduce", "a", "new", "pre", "-", "training", "task", "of", "predicting", "the", "syntactic", "distance", "among", "tokens", "in", "the", "dependency", "tree", ".", "we", "evaluate", "the", "model", "on", "three", "downstream", "tasks", ",", "including", "relation", "classification", ",", "entity", "typing", ",", "and", "question", "answering", ".", "results", "show", "that", "our", "model", "achieves", "state", "-", "of", "-", "the", "-", "art", "performance", "on", "six", "public", "benchmark", "datasets", ".", "we", "have", "two", "major", "findings", ".", "first", ",", "we", "demonstrate", "that", "infusing", "automatically", "produced", "syntax", "of", "text", "improves", "pre", "-", "trained", "models", ".", "second", ",", "global", "syntactic", "distances", "among", "tokens", "bring", "larger", "performance", "gains", "compared", "to", "local", "head", "relations", "between", "contiguous", "tokens", "."]}, {"venue": "ACL", "title": "SherLIiC: A Typed Event-Focused Lexical Inference Benchmark for Evaluating Natural Language Inference", "abstract": "We present SherLIiC, a testbed for lexical inference in context (LIiC), consisting of 3985 manually annotated inference rule candidates (InfCands), accompanied by (i) ~960k unlabeled InfCands, and (ii) ~190k typed textual relations between Freebase entities extracted from the large entity-linked corpus ClueWeb09. Each InfCand consists of one of these relations, expressed as a lemmatized dependency path, and two argument placeholders, each linked to one or more Freebase types. Due to our candidate selection process based on strong distributional evidence, SherLIiC is much harder than existing testbeds because distributional evidence is of little utility in the classification of InfCands. We also show that, due to its construction, many of SherLIiC\u2019s correct InfCands are novel and missing from existing rule bases. We evaluate a large number of strong baselines on SherLIiC, ranging from semantic vector space models to state of the art neural models of natural language inference (NLI). We show that SherLIiC poses a tough challenge to existing NLI systems.", "doc_id": "a737a52ac1b3dcc219bd2fc3a5d1b876", "publication_year": 2019, "sentences": ["we present sherliic , a testbed for lexical inference in context ( liic ) , consisting of 3985 manually annotated inference rule candidates ( infcands ) , accompanied by ( i ) ~ 960k unlabeled infcands , and ( ii ) ~ 190k typed textual relations between freebase entities extracted from the large entity - linked corpus clueweb09 .", "each infcand consists of one of these relations , expressed as a lemmatized dependency path , and two argument placeholders , each linked to one or more freebase types .", "due to our candidate selection process based on strong distributional evidence , sherliic is much harder than existing testbeds because distributional evidence is of little utility in the classification of infcands .", "we also show that , due to its construction , many of sherliic \u2019 s correct infcands are novel and missing from existing rule bases .", "we evaluate a large number of strong baselines on sherliic , ranging from semantic vector space models to state of the art neural models of natural language inference ( nli ) .", "we show that sherliic poses a tough challenge to existing nli systems ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [0]}, {"text": "sherliic", "nugget_type": "APP", "argument_type": "Content", "tokens": ["sherliic"], "offsets": [2]}, {"text": "lexical inference", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["lexical", "inference"], "offsets": [7, 8]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [1]}}, {"event_type": "MDS", "arguments": [{"text": "argument placeholders", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["argument", "placeholders"], "offsets": [77, 78]}, {"text": "lemmatized dependency path", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["lemmatized", "dependency", "path"], "offsets": [71, 72, 73]}, {"text": "one or more freebase types", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["one", "or", "more", "freebase", "types"], "offsets": [83, 84, 85, 86, 87]}], "trigger": {"text": "linked", "tokens": ["linked"], "offsets": [81]}}, {"event_type": "CMP", "arguments": [{"text": "sherliic", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["sherliic"], "offsets": [101]}, {"text": "existing testbeds", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["existing", "testbeds"], "offsets": [106, 107]}, {"text": "much", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["much"], "offsets": [103]}], "trigger": {"text": "harder", "tokens": ["harder"], "offsets": [104]}}, {"event_type": "FAC", "arguments": [{"text": "sherliic \u2019 s correct infcands", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["sherliic", "\u2019", "s", "correct", "infcands"], "offsets": [133, 134, 135, 136, 137]}, {"text": "from existing rule bases", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "existing", "rule", "bases"], "offsets": [142, 143, 144, 145]}], "trigger": {"text": "novel and missing", "tokens": ["novel", "and", "missing"], "offsets": [139, 140, 141]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [147]}, {"text": "large number of strong baselines", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["large", "number", "of", "strong", "baselines"], "offsets": [150, 151, 152, 153, 154]}, {"text": "on sherliic", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "sherliic"], "offsets": [155, 156]}], "trigger": {"text": "evaluate", "tokens": ["evaluate"], "offsets": [148]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [179]}, {"text": "poses", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["poses"], "offsets": [183]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [180]}}, {"event_type": "FAC", "arguments": [{"text": "sherliic", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["sherliic"], "offsets": [182]}, {"text": "tough challenge", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["tough", "challenge"], "offsets": [185, 186]}], "trigger": {"text": "poses", "tokens": ["poses"], "offsets": [183]}}, {"event_type": "MDS", "arguments": [{"text": "960k unlabeled infcands", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["960k", "unlabeled", "infcands"], "offsets": [33, 34, 35]}, {"text": "190k typed textual relations", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["190k", "typed", "textual", "relations"], "offsets": [42, 43, 44, 45]}, {"text": "large entity - linked corpus clueweb09", "nugget_type": "DST", "argument_type": "BaseComponent", "tokens": ["large", "entity", "-", "linked", "corpus", "clueweb09"], "offsets": [52, 53, 54, 55, 56, 57]}, {"text": "between freebase entities", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["between", "freebase", "entities"], "offsets": [46, 47, 48]}], "trigger": {"text": "extracted", "tokens": ["extracted"], "offsets": [49]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [121]}, {"text": "novel and missing", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["novel", "and", "missing"], "offsets": [139, 140, 141]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [123]}}], "document": ["we", "present", "sherliic", ",", "a", "testbed", "for", "lexical", "inference", "in", "context", "(", "liic", ")", ",", "consisting", "of", "3985", "manually", "annotated", "inference", "rule", "candidates", "(", "infcands", ")", ",", "accompanied", "by", "(", "i", ")", "~", "960k", "unlabeled", "infcands", ",", "and", "(", "ii", ")", "~", "190k", "typed", "textual", "relations", "between", "freebase", "entities", "extracted", "from", "the", "large", "entity", "-", "linked", "corpus", "clueweb09", ".", "each", "infcand", "consists", "of", "one", "of", "these", "relations", ",", "expressed", "as", "a", "lemmatized", "dependency", "path", ",", "and", "two", "argument", "placeholders", ",", "each", "linked", "to", "one", "or", "more", "freebase", "types", ".", "due", "to", "our", "candidate", "selection", "process", "based", "on", "strong", "distributional", "evidence", ",", "sherliic", "is", "much", "harder", "than", "existing", "testbeds", "because", "distributional", "evidence", "is", "of", "little", "utility", "in", "the", "classification", "of", "infcands", ".", "we", "also", "show", "that", ",", "due", "to", "its", "construction", ",", "many", "of", "sherliic", "\u2019", "s", "correct", "infcands", "are", "novel", "and", "missing", "from", "existing", "rule", "bases", ".", "we", "evaluate", "a", "large", "number", "of", "strong", "baselines", "on", "sherliic", ",", "ranging", "from", "semantic", "vector", "space", "models", "to", "state", "of", "the", "art", "neural", "models", "of", "natural", "language", "inference", "(", "nli", ")", ".", "we", "show", "that", "sherliic", "poses", "a", "tough", "challenge", "to", "existing", "nli", "systems", "."]}, {"venue": "ACL", "title": "What Question Answering can Learn from Trivia Nerds", "abstract": "In addition to the traditional task of machines answering questions, question answering (QA) research creates interesting, challenging questions that help systems how to answer questions and reveal the best systems. We argue that creating a QA dataset\u2014and the ubiquitous leaderboard that goes with it\u2014closely resembles running a trivia tournament: you write questions, have agents (either humans or machines) answer the questions, and declare a winner. However, the research community has ignored the hard-learned lessons from decades of the trivia community creating vibrant, fair, and effective question answering competitions. After detailing problems with existing QA datasets, we outline the key lessons\u2014removing ambiguity, discriminating skill, and adjudicating disputes\u2014that can transfer to QA research and how they might be implemented.", "doc_id": "288ba106f21d94b969d659740549d183", "publication_year": 2020, "sentences": ["in addition to the traditional task of machines answering questions , question answering ( qa ) research creates interesting , challenging questions that help systems how to answer questions and reveal the best systems .", "we argue that creating a qa dataset \u2014 and the ubiquitous leaderboard that goes with it \u2014 closely resembles running a trivia tournament : you write questions , have agents ( either humans or machines ) answer the questions , and declare a winner .", "however , the research community has ignored the hard - learned lessons from decades of the trivia community creating vibrant , fair , and effective question answering competitions .", "after detailing problems with existing qa datasets , we outline the key lessons \u2014 removing ambiguity , discriminating skill , and adjudicating disputes \u2014 that can transfer to qa research and how they might be implemented ."], "events": [{"event_type": "ITT", "arguments": [{"text": "question answering", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["question", "answering"], "offsets": [11, 12]}], "trigger": {"text": "creates", "tokens": ["creates"], "offsets": [17]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [117]}, {"text": "removing ambiguity", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["removing", "ambiguity"], "offsets": [123, 124]}, {"text": "discriminating skill", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["discriminating", "skill"], "offsets": [126, 127]}, {"text": "adjudicating disputes", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["adjudicating", "disputes"], "offsets": [130, 131]}], "trigger": {"text": "outline", "tokens": ["outline"], "offsets": [118]}}], "document": ["in", "addition", "to", "the", "traditional", "task", "of", "machines", "answering", "questions", ",", "question", "answering", "(", "qa", ")", "research", "creates", "interesting", ",", "challenging", "questions", "that", "help", "systems", "how", "to", "answer", "questions", "and", "reveal", "the", "best", "systems", ".", "we", "argue", "that", "creating", "a", "qa", "dataset", "\u2014", "and", "the", "ubiquitous", "leaderboard", "that", "goes", "with", "it", "\u2014", "closely", "resembles", "running", "a", "trivia", "tournament", ":", "you", "write", "questions", ",", "have", "agents", "(", "either", "humans", "or", "machines", ")", "answer", "the", "questions", ",", "and", "declare", "a", "winner", ".", "however", ",", "the", "research", "community", "has", "ignored", "the", "hard", "-", "learned", "lessons", "from", "decades", "of", "the", "trivia", "community", "creating", "vibrant", ",", "fair", ",", "and", "effective", "question", "answering", "competitions", ".", "after", "detailing", "problems", "with", "existing", "qa", "datasets", ",", "we", "outline", "the", "key", "lessons", "\u2014", "removing", "ambiguity", ",", "discriminating", "skill", ",", "and", "adjudicating", "disputes", "\u2014", "that", "can", "transfer", "to", "qa", "research", "and", "how", "they", "might", "be", "implemented", "."]}, {"venue": "ACL", "title": "Leveraging Local and Global Patterns for Self-Attention Networks", "abstract": "Self-attention networks have received increasing research attention. By default, the hidden states of each word are hierarchically calculated by attending to all words in the sentence, which assembles global information. However, several studies pointed out that taking all signals into account may lead to overlooking neighboring information (e.g. phrase pattern). To address this argument, we propose a hybrid attention mechanism to dynamically leverage both of the local and global information. Specifically, our approach uses a gating scalar for integrating both sources of the information, which is also convenient for quantifying their contributions. Experiments on various neural machine translation tasks demonstrate the effectiveness of the proposed method. The extensive analyses verify that the two types of contexts are complementary to each other, and our method gives highly effective improvements in their integration.", "doc_id": "fcf7f2065f4580c92fd9bc066bd4eeab", "publication_year": 2019, "sentences": ["self - attention networks have received increasing research attention .", "by default , the hidden states of each word are hierarchically calculated by attending to all words in the sentence , which assembles global information .", "however , several studies pointed out that taking all signals into account may lead to overlooking neighboring information ( e . g . phrase pattern ) .", "to address this argument , we propose a hybrid attention mechanism to dynamically leverage both of the local and global information .", "specifically , our approach uses a gating scalar for integrating both sources of the information , which is also convenient for quantifying their contributions .", "experiments on various neural machine translation tasks demonstrate the effectiveness of the proposed method .", "the extensive analyses verify that the two types of contexts are complementary to each other , and our method gives highly effective improvements in their integration ."], "events": [{"event_type": "ITT", "arguments": [{"text": "self - attention networks", "nugget_type": "APP", "argument_type": "Target", "tokens": ["self", "-", "attention", "networks"], "offsets": [0, 1, 2, 3]}], "trigger": {"text": "received", "tokens": ["received"], "offsets": [5]}}, {"event_type": "RWS", "arguments": [{"text": "hidden states of each word", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["hidden", "states", "of", "each", "word"], "offsets": [14, 15, 16, 17, 18]}, {"text": "all words", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["all", "words"], "offsets": [25, 26]}, {"text": "in the sentence", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "sentence"], "offsets": [27, 28, 29]}], "trigger": {"text": "calculated", "tokens": ["calculated"], "offsets": [21]}}, {"event_type": "RWF", "arguments": [{"text": "overlooking", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["overlooking"], "offsets": [51]}], "trigger": {"text": "overlooking", "tokens": ["overlooking"], "offsets": [51]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [68]}, {"text": "hybrid attention mechanism", "nugget_type": "APP", "argument_type": "Content", "tokens": ["hybrid", "attention", "mechanism"], "offsets": [71, 72, 73]}, {"text": "leverage", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["leverage"], "offsets": [76]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [69]}}, {"event_type": "PUR", "arguments": [{"text": "local information", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["local", "information"], "offsets": [80, 83]}, {"text": "global information", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["global", "information"], "offsets": [82, 83]}], "trigger": {"text": "leverage", "tokens": ["leverage"], "offsets": [76]}}, {"event_type": "MDS", "arguments": [{"text": "gating scalar", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["gating", "scalar"], "offsets": [91, 92]}, {"text": "both sources of the information", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["both", "sources", "of", "the", "information"], "offsets": [95, 96, 97, 98, 99]}, {"text": "quantifying", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["quantifying"], "offsets": [106]}], "trigger": {"text": "integrating", "tokens": ["integrating"], "offsets": [94]}}, {"event_type": "PUR", "arguments": [{"text": "their contributions", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["both", "sources", "of", "the", "information", "contributions"], "offsets": [95, 96, 97, 98, 99, 108]}], "trigger": {"text": "quantifying", "tokens": ["quantifying"], "offsets": [106]}}, {"event_type": "FAC", "arguments": [{"text": "on various neural machine translation tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "various", "neural", "machine", "translation", "tasks"], "offsets": [111, 112, 113, 114, 115, 116]}, {"text": "effectiveness of the proposed method", "nugget_type": "STR", "argument_type": "Object", "tokens": ["effectiveness", "of", "the", "hybrid", "attention", "mechanism"], "offsets": [119, 120, 121, 71, 72, 73]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [117]}}, {"event_type": "FAC", "arguments": [{"text": "two types of contexts", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["two", "types", "of", "contexts"], "offsets": [131, 132, 133, 134]}, {"text": "to each other", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["to", "each", "other"], "offsets": [137, 138, 139]}], "trigger": {"text": "complementary", "tokens": ["complementary"], "offsets": [136]}}, {"event_type": "FAC", "arguments": [{"text": "in their integration", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "their", "integration"], "offsets": [148, 149, 150]}, {"text": "hybrid attention mechanism", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["hybrid", "attention", "mechanism"], "offsets": [71, 72, 73]}, {"text": "highly effective improvements", "nugget_type": "STR", "argument_type": "Object", "tokens": ["highly", "effective", "improvements"], "offsets": [145, 146, 147]}], "trigger": {"text": "gives", "tokens": ["gives"], "offsets": [144]}}], "document": ["self", "-", "attention", "networks", "have", "received", "increasing", "research", "attention", ".", "by", "default", ",", "the", "hidden", "states", "of", "each", "word", "are", "hierarchically", "calculated", "by", "attending", "to", "all", "words", "in", "the", "sentence", ",", "which", "assembles", "global", "information", ".", "however", ",", "several", "studies", "pointed", "out", "that", "taking", "all", "signals", "into", "account", "may", "lead", "to", "overlooking", "neighboring", "information", "(", "e", ".", "g", ".", "phrase", "pattern", ")", ".", "to", "address", "this", "argument", ",", "we", "propose", "a", "hybrid", "attention", "mechanism", "to", "dynamically", "leverage", "both", "of", "the", "local", "and", "global", "information", ".", "specifically", ",", "our", "approach", "uses", "a", "gating", "scalar", "for", "integrating", "both", "sources", "of", "the", "information", ",", "which", "is", "also", "convenient", "for", "quantifying", "their", "contributions", ".", "experiments", "on", "various", "neural", "machine", "translation", "tasks", "demonstrate", "the", "effectiveness", "of", "the", "proposed", "method", ".", "the", "extensive", "analyses", "verify", "that", "the", "two", "types", "of", "contexts", "are", "complementary", "to", "each", "other", ",", "and", "our", "method", "gives", "highly", "effective", "improvements", "in", "their", "integration", "."]}, {"venue": "ACL", "title": "Multi-View Document Representation Learning for Open-Domain Dense Retrieval", "abstract": "Dense retrieval has achieved impressive advances in first-stage retrieval from a large-scale document collection, which is built on bi-encoder architecture to produce single vector representation of query and document. However, a document can usually answer multiple potential queries from different views. So the single vector representation of a document is hard to match with multi-view queries, and faces a semantic mismatch problem. This paper proposes a multi-view document representation learning framework, aiming to produce multi-view embeddings to represent documents and enforce them to align with different queries. First, we propose a simple yet effective method of generating multiple embeddings through viewers. Second, to prevent multi-view embeddings from collapsing to the same one, we further propose a global-local loss with annealed temperature to encourage the multiple viewers to better align with different potential queries. Experiments show our method outperforms recent works and achieves state-of-the-art results.", "doc_id": "f5a30ff80fcc44daf9fbc154cfda258b", "publication_year": 2022, "sentences": ["dense retrieval has achieved impressive advances in first - stage retrieval from a large - scale document collection , which is built on bi - encoder architecture to produce single vector representation of query and document .", "however , a document can usually answer multiple potential queries from different views .", "so the single vector representation of a document is hard to match with multi - view queries , and faces a semantic mismatch problem .", "this paper proposes a multi - view document representation learning framework , aiming to produce multi - view embeddings to represent documents and enforce them to align with different queries .", "first , we propose a simple yet effective method of generating multiple embeddings through viewers .", "second , to prevent multi - view embeddings from collapsing to the same one , we further propose a global - local loss with annealed temperature to encourage the multiple viewers to better align with different potential queries .", "experiments show our method outperforms recent works and achieves state - of - the - art results ."], "events": [{"event_type": "ITT", "arguments": [{"text": "dense retrieval", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["dense", "retrieval"], "offsets": [0, 1]}], "trigger": {"text": "achieved", "tokens": ["achieved"], "offsets": [3]}}, {"event_type": "RWS", "arguments": [{"text": "single vector representation of query", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["single", "vector", "representation", "of", "query"], "offsets": [29, 30, 31, 32, 33]}, {"text": "single vector representation of document", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["single", "vector", "representation", "of", "document"], "offsets": [29, 30, 31, 32, 35]}], "trigger": {"text": "produce", "tokens": ["produce"], "offsets": [28]}}, {"event_type": "RWF", "arguments": [{"text": "single vector representation of a document", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["single", "vector", "representation", "of", "a", "document"], "offsets": [53, 54, 55, 56, 57, 58]}, {"text": "hard to match", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["hard", "to", "match"], "offsets": [60, 61, 62]}], "trigger": {"text": "hard to match", "tokens": ["hard", "to", "match"], "offsets": [60, 61, 62]}}, {"event_type": "RWF", "arguments": [{"text": "semantic mismatch problem", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["semantic", "mismatch", "problem"], "offsets": [72, 73, 74]}], "trigger": {"text": "faces", "tokens": ["faces"], "offsets": [70]}}, {"event_type": "PRP", "arguments": [{"text": "multi - view document representation learning framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["multi", "-", "view", "document", "representation", "learning", "framework"], "offsets": [80, 81, 82, 83, 84, 85, 86]}], "trigger": {"text": "proposes", "tokens": ["proposes"], "offsets": [78]}}, {"event_type": "MDS", "arguments": [{"text": "multi - view embeddings", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["multi", "-", "view", "embeddings"], "offsets": [91, 92, 93, 94]}, {"text": "represent", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["represent"], "offsets": [96]}], "trigger": {"text": "produce", "tokens": ["produce"], "offsets": [90]}}, {"event_type": "MDS", "arguments": [{"text": "documents", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["documents"], "offsets": [97]}, {"text": "different queries", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["different", "queries"], "offsets": [104, 105]}], "trigger": {"text": "align", "tokens": ["align"], "offsets": [102]}}, {"event_type": "MDS", "arguments": [{"text": "multiple embeddings", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["multiple", "embeddings"], "offsets": [118, 119]}, {"text": "viewers", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["viewers"], "offsets": [121]}], "trigger": {"text": "generating", "tokens": ["generating"], "offsets": [117]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [138]}, {"text": "global - local loss with annealed temperature", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["global", "-", "local", "loss", "with", "annealed", "temperature"], "offsets": [142, 143, 144, 145, 146, 147, 148]}, {"text": "prevent", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["prevent"], "offsets": [126]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [140]}}, {"event_type": "PUR", "arguments": [{"text": "multi - view embeddings", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["multi", "-", "view", "embeddings"], "offsets": [127, 128, 129, 130]}, {"text": "from collapsing to the same one", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "collapsing", "to", "the", "same", "one"], "offsets": [131, 132, 133, 134, 135, 136]}], "trigger": {"text": "prevent", "tokens": ["prevent"], "offsets": [126]}}, {"event_type": "MDS", "arguments": [{"text": "different potential queries", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["different", "potential", "queries"], "offsets": [158, 159, 160]}, {"text": "multiple viewers", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["multiple", "viewers"], "offsets": [152, 153]}], "trigger": {"text": "better align", "tokens": ["better", "align"], "offsets": [155, 156]}}, {"event_type": "CMP", "arguments": [{"text": "multi - view document representation learning framework", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["multi", "-", "view", "document", "representation", "learning", "framework"], "offsets": [80, 81, 82, 83, 84, 85, 86]}, {"text": "recent works", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["recent", "works"], "offsets": [167, 168]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [166]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [166]}}, {"event_type": "FAC", "arguments": [{"text": "simple yet effective method", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["simple", "yet", "effective", "method"], "offsets": [112, 113, 114, 115]}, {"text": "state - of - the - art results", "nugget_type": "STR", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art", "results"], "offsets": [171, 172, 173, 174, 175, 176, 177, 178]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [170]}}, {"event_type": "PUR", "arguments": [{"text": "documents", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["documents"], "offsets": [97]}], "trigger": {"text": "represent", "tokens": ["represent"], "offsets": [96]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [109]}, {"text": "simple yet effective method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["simple", "yet", "effective", "method"], "offsets": [112, 113, 114, 115]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [110]}}], "document": ["dense", "retrieval", "has", "achieved", "impressive", "advances", "in", "first", "-", "stage", "retrieval", "from", "a", "large", "-", "scale", "document", "collection", ",", "which", "is", "built", "on", "bi", "-", "encoder", "architecture", "to", "produce", "single", "vector", "representation", "of", "query", "and", "document", ".", "however", ",", "a", "document", "can", "usually", "answer", "multiple", "potential", "queries", "from", "different", "views", ".", "so", "the", "single", "vector", "representation", "of", "a", "document", "is", "hard", "to", "match", "with", "multi", "-", "view", "queries", ",", "and", "faces", "a", "semantic", "mismatch", "problem", ".", "this", "paper", "proposes", "a", "multi", "-", "view", "document", "representation", "learning", "framework", ",", "aiming", "to", "produce", "multi", "-", "view", "embeddings", "to", "represent", "documents", "and", "enforce", "them", "to", "align", "with", "different", "queries", ".", "first", ",", "we", "propose", "a", "simple", "yet", "effective", "method", "of", "generating", "multiple", "embeddings", "through", "viewers", ".", "second", ",", "to", "prevent", "multi", "-", "view", "embeddings", "from", "collapsing", "to", "the", "same", "one", ",", "we", "further", "propose", "a", "global", "-", "local", "loss", "with", "annealed", "temperature", "to", "encourage", "the", "multiple", "viewers", "to", "better", "align", "with", "different", "potential", "queries", ".", "experiments", "show", "our", "method", "outperforms", "recent", "works", "and", "achieves", "state", "-", "of", "-", "the", "-", "art", "results", "."]}, {"venue": "ACL", "title": "PRIMERA: Pyramid-based Masked Sentence Pre-training for Multi-document Summarization", "abstract": "We introduce PRIMERA, a pre-trained model for multi-document representation with a focus on summarization that reduces the need for dataset-specific architectures and large amounts of fine-tuning labeled data. PRIMERA uses our newly proposed pre-training objective designed to teach the model to connect and aggregate information across documents. It also uses efficient encoder-decoder transformers to simplify the processing of concatenated input documents. With extensive experiments on 6 multi-document summarization datasets from 3 different domains on zero-shot, few-shot and full-supervised settings, PRIMERA outperforms current state-of-the-art dataset-specific and pre-trained models on most of these settings with large margins.", "doc_id": "0fa997948c2f3168adc6a8f9e734260d", "publication_year": 2022, "sentences": ["we introduce primera , a pre - trained model for multi - document representation with a focus on summarization that reduces the need for dataset - specific architectures and large amounts of fine - tuning labeled data .", "primera uses our newly proposed pre - training objective designed to teach the model to connect and aggregate information across documents .", "it also uses efficient encoder - decoder transformers to simplify the processing of concatenated input documents .", "with extensive experiments on 6 multi - document summarization datasets from 3 different domains on zero - shot , few - shot and full - supervised settings , primera outperforms current state - of - the - art dataset - specific and pre - trained models on most of these settings with large margins ."], "events": [{"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [0]}, {"text": "primera", "nugget_type": "APP", "argument_type": "Content", "tokens": ["primera"], "offsets": [2]}, {"text": "multi - document representation", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["multi", "-", "document", "representation"], "offsets": [10, 11, 12, 13]}, {"text": "with a focus on summarization", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "a", "focus", "on", "summarization"], "offsets": [14, 15, 16, 17, 18]}, {"text": "reduces", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["reduces"], "offsets": [20]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [1]}}, {"event_type": "PUR", "arguments": [{"text": "need for dataset - specific architectures", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["need", "for", "dataset", "-", "specific", "architectures"], "offsets": [22, 23, 24, 25, 26, 27]}, {"text": "need for large amounts of fine - tuning labeled data", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["need", "for", "large", "amounts", "of", "fine", "-", "tuning", "labeled", "data"], "offsets": [22, 23, 29, 30, 31, 32, 33, 34, 35, 36]}], "trigger": {"text": "reduces", "tokens": ["reduces"], "offsets": [20]}}, {"event_type": "PRP", "arguments": [{"text": "pre - training objective", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["pre", "-", "training", "objective"], "offsets": [43, 44, 45, 46]}, {"text": "our", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["our"], "offsets": [40]}], "trigger": {"text": "proposed", "tokens": ["proposed"], "offsets": [42]}}, {"event_type": "WKS", "arguments": [{"text": "pre - training objective", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["pre", "-", "training", "objective"], "offsets": [43, 44, 45, 46]}, {"text": "connect and aggregate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["connect", "and", "aggregate"], "offsets": [53, 54, 55]}], "trigger": {"text": "uses", "tokens": ["uses"], "offsets": [39]}}, {"event_type": "PUR", "arguments": [{"text": "information", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["information"], "offsets": [56]}, {"text": "across documents", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["across", "documents"], "offsets": [57, 58]}], "trigger": {"text": "connect and aggregate", "tokens": ["connect", "and", "aggregate"], "offsets": [53, 54, 55]}}, {"event_type": "WKS", "arguments": [{"text": "efficient encoder - decoder transformers", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["efficient", "encoder", "-", "decoder", "transformers"], "offsets": [63, 64, 65, 66, 67]}, {"text": "simplify", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["simplify"], "offsets": [69]}], "trigger": {"text": "uses", "tokens": ["uses"], "offsets": [62]}}, {"event_type": "PUR", "arguments": [{"text": "processing of concatenated input documents", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["processing", "of", "concatenated", "input", "documents"], "offsets": [71, 72, 73, 74, 75]}], "trigger": {"text": "simplify", "tokens": ["simplify"], "offsets": [69]}}, {"event_type": "CMP", "arguments": [{"text": "6 multi - document summarization datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["6", "multi", "-", "document", "summarization", "datasets"], "offsets": [81, 82, 83, 84, 85, 86]}, {"text": "primera", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["primera"], "offsets": [105]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [106]}, {"text": "current state - of - the - art dataset - specific", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["current", "state", "-", "of", "-", "the", "-", "art", "dataset", "-", "specific"], "offsets": [107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117]}, {"text": "pre - trained models", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["pre", "-", "trained", "models"], "offsets": [119, 120, 121, 122]}, {"text": "large margins", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["large", "margins"], "offsets": [129, 130]}, {"text": "zero - shot settings", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["zero", "-", "shot", "settings"], "offsets": [92, 93, 94, 103]}, {"text": "few - shot settings", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["few", "-", "shot", "settings"], "offsets": [96, 97, 98, 103]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [106]}}], "document": ["we", "introduce", "primera", ",", "a", "pre", "-", "trained", "model", "for", "multi", "-", "document", "representation", "with", "a", "focus", "on", "summarization", "that", "reduces", "the", "need", "for", "dataset", "-", "specific", "architectures", "and", "large", "amounts", "of", "fine", "-", "tuning", "labeled", "data", ".", "primera", "uses", "our", "newly", "proposed", "pre", "-", "training", "objective", "designed", "to", "teach", "the", "model", "to", "connect", "and", "aggregate", "information", "across", "documents", ".", "it", "also", "uses", "efficient", "encoder", "-", "decoder", "transformers", "to", "simplify", "the", "processing", "of", "concatenated", "input", "documents", ".", "with", "extensive", "experiments", "on", "6", "multi", "-", "document", "summarization", "datasets", "from", "3", "different", "domains", "on", "zero", "-", "shot", ",", "few", "-", "shot", "and", "full", "-", "supervised", "settings", ",", "primera", "outperforms", "current", "state", "-", "of", "-", "the", "-", "art", "dataset", "-", "specific", "and", "pre", "-", "trained", "models", "on", "most", "of", "these", "settings", "with", "large", "margins", "."]}, {"venue": "ACL", "title": "Unsupervised Learning of PCFGs with Normalizing Flow", "abstract": "Unsupervised PCFG inducers hypothesize sets of compact context-free rules as explanations for sentences. PCFG induction not only provides tools for low-resource languages, but also plays an important role in modeling language acquisition (Bannard et al., 2009; Abend et al. 2017). However, current PCFG induction models, using word tokens as input, are unable to incorporate semantics and morphology into induction, and may encounter issues of sparse vocabulary when facing morphologically rich languages. This paper describes a neural PCFG inducer which employs context embeddings (Peters et al., 2018) in a normalizing flow model (Dinh et al., 2015) to extend PCFG induction to use semantic and morphological information. Linguistically motivated sparsity and categorical distance constraints are imposed on the inducer as regularization. Experiments show that the PCFG induction model with normalizing flow produces grammars with state-of-the-art accuracy on a variety of different languages. Ablation further shows a positive effect of normalizing flow, context embeddings and proposed regularizers.", "doc_id": "9944316024d55878f3589ec4cdfb8648", "publication_year": 2019, "sentences": ["unsupervised pcfg inducers hypothesize sets of compact context - free rules as explanations for sentences .", "pcfg induction not only provides tools for low - resource languages , but also plays an important role in modeling language acquisition ( bannard et al . , 2009 ; abend et al . 2017 ) .", "however , current pcfg induction models , using word tokens as input , are unable to incorporate semantics and morphology into induction , and may encounter issues of sparse vocabulary when facing morphologically rich languages .", "this paper describes a neural pcfg inducer which employs context embeddings ( peters et al . , 2018 ) in a normalizing flow model ( dinh et al . , 2015 ) to extend pcfg induction to use semantic and morphological information .", "linguistically motivated sparsity and categorical distance constraints are imposed on the inducer as regularization .", "experiments show that the pcfg induction model with normalizing flow produces grammars with state - of - the - art accuracy on a variety of different languages .", "ablation further shows a positive effect of normalizing flow , context embeddings and proposed regularizers ."], "events": [{"event_type": "ITT", "arguments": [{"text": "unsupervised pcfg inducers", "nugget_type": "MOD", "argument_type": "Target", "tokens": ["unsupervised", "pcfg", "inducers"], "offsets": [0, 1, 2]}], "trigger": {"text": "hypothesize", "tokens": ["hypothesize"], "offsets": [3]}}, {"event_type": "RWS", "arguments": [{"text": "current pcfg induction models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["current", "pcfg", "induction", "models"], "offsets": [55, 56, 57, 58]}, {"text": "word tokens", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["word", "tokens"], "offsets": [61, 62]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [60]}}, {"event_type": "RWF", "arguments": [{"text": "current pcfg induction models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["current", "pcfg", "induction", "models"], "offsets": [55, 56, 57, 58]}, {"text": "semantics", "nugget_type": "FEA", "argument_type": "Fault", "tokens": ["semantics"], "offsets": [70]}, {"text": "morphology", "nugget_type": "FEA", "argument_type": "Fault", "tokens": ["morphology"], "offsets": [72]}, {"text": "induction", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["induction"], "offsets": [74]}], "trigger": {"text": "unable to incorporate", "tokens": ["unable", "to", "incorporate"], "offsets": [67, 68, 69]}}, {"event_type": "RWF", "arguments": [{"text": "current pcfg induction models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["current", "pcfg", "induction", "models"], "offsets": [55, 56, 57, 58]}, {"text": "issues of sparse vocabulary", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["issues", "of", "sparse", "vocabulary"], "offsets": [79, 80, 81, 82]}, {"text": "when facing morphologically rich languages", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "facing", "morphologically", "rich", "languages"], "offsets": [83, 84, 85, 86, 87]}], "trigger": {"text": "may encounter", "tokens": ["may", "encounter"], "offsets": [77, 78]}}, {"event_type": "PRP", "arguments": [{"text": "neural pcfg inducer", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["neural", "pcfg", "inducer"], "offsets": [93, 94, 95]}], "trigger": {"text": "describes", "tokens": ["describes"], "offsets": [91]}}, {"event_type": "MDS", "arguments": [{"text": "in a normalizing flow model", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "a", "normalizing", "flow", "model"], "offsets": [108, 109, 110, 111, 112]}, {"text": "pcfg induction", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["pcfg", "induction"], "offsets": [123, 124]}, {"text": "use", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["use"], "offsets": [126]}, {"text": "context embeddings", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["context", "embeddings"], "offsets": [98, 99]}], "trigger": {"text": "employs", "tokens": ["employs"], "offsets": [97]}}, {"event_type": "PUR", "arguments": [{"text": "semantic information", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["semantic", "information"], "offsets": [127, 130]}, {"text": "morphological information", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["morphological", "information"], "offsets": [129, 130]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [126]}}, {"event_type": "WKS", "arguments": [{"text": "linguistically motivated sparsity", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["linguistically", "motivated", "sparsity"], "offsets": [132, 133, 134]}, {"text": "categorical distance constraints", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["categorical", "distance", "constraints"], "offsets": [136, 137, 138]}, {"text": "regularization", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["regularization"], "offsets": [145]}, {"text": "on the inducer", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "the", "inducer"], "offsets": [141, 142, 143]}], "trigger": {"text": "imposed", "tokens": ["imposed"], "offsets": [140]}}, {"event_type": "FAC", "arguments": [{"text": "on a variety of different languages", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "a", "variety", "of", "different", "languages"], "offsets": [168, 169, 170, 171, 172, 173]}, {"text": "pcfg induction model with normalizing flow", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["pcfg", "induction", "model", "with", "normalizing", "flow"], "offsets": [151, 152, 153, 154, 155, 156]}, {"text": "grammars with state - of - the - art accuracy", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["grammars", "with", "state", "-", "of", "-", "the", "-", "art", "accuracy"], "offsets": [158, 159, 160, 161, 162, 163, 164, 165, 166, 167]}], "trigger": {"text": "produces", "tokens": ["produces"], "offsets": [157]}}, {"event_type": "FAC", "arguments": [{"text": "positive effect", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["positive", "effect"], "offsets": [179, 180]}, {"text": "normalizing flow", "nugget_type": "MOD", "argument_type": "Subject", "tokens": ["normalizing", "flow"], "offsets": [182, 183]}, {"text": "context embeddings", "nugget_type": "MOD", "argument_type": "Subject", "tokens": ["context", "embeddings"], "offsets": [185, 186]}, {"text": "proposed regularizers", "nugget_type": "MOD", "argument_type": "Subject", "tokens": ["proposed", "regularizers"], "offsets": [188, 189]}], "trigger": {"text": "further shows", "tokens": ["further", "shows"], "offsets": [176, 177]}}], "document": ["unsupervised", "pcfg", "inducers", "hypothesize", "sets", "of", "compact", "context", "-", "free", "rules", "as", "explanations", "for", "sentences", ".", "pcfg", "induction", "not", "only", "provides", "tools", "for", "low", "-", "resource", "languages", ",", "but", "also", "plays", "an", "important", "role", "in", "modeling", "language", "acquisition", "(", "bannard", "et", "al", ".", ",", "2009", ";", "abend", "et", "al", ".", "2017", ")", ".", "however", ",", "current", "pcfg", "induction", "models", ",", "using", "word", "tokens", "as", "input", ",", "are", "unable", "to", "incorporate", "semantics", "and", "morphology", "into", "induction", ",", "and", "may", "encounter", "issues", "of", "sparse", "vocabulary", "when", "facing", "morphologically", "rich", "languages", ".", "this", "paper", "describes", "a", "neural", "pcfg", "inducer", "which", "employs", "context", "embeddings", "(", "peters", "et", "al", ".", ",", "2018", ")", "in", "a", "normalizing", "flow", "model", "(", "dinh", "et", "al", ".", ",", "2015", ")", "to", "extend", "pcfg", "induction", "to", "use", "semantic", "and", "morphological", "information", ".", "linguistically", "motivated", "sparsity", "and", "categorical", "distance", "constraints", "are", "imposed", "on", "the", "inducer", "as", "regularization", ".", "experiments", "show", "that", "the", "pcfg", "induction", "model", "with", "normalizing", "flow", "produces", "grammars", "with", "state", "-", "of", "-", "the", "-", "art", "accuracy", "on", "a", "variety", "of", "different", "languages", ".", "ablation", "further", "shows", "a", "positive", "effect", "of", "normalizing", "flow", ",", "context", "embeddings", "and", "proposed", "regularizers", "."]}, {"venue": "ACL", "title": "Can Synthetic Translations Improve Bitext Quality?", "abstract": "Synthetic translations have been used for a wide range of NLP tasks primarily as a means of data augmentation. This work explores, instead, how synthetic translations can be used to revise potentially imperfect reference translations in mined bitext. We find that synthetic samples can improve bitext quality without any additional bilingual supervision when they replace the originals based on a semantic equivalence classifier that helps mitigate NMT noise. The improved quality of the revised bitext is confirmed intrinsically via human evaluation and extrinsically through bilingual induction and MT tasks.", "doc_id": "6ed8835f222fc6aefc34289bb8bc7341", "publication_year": 2022, "sentences": ["synthetic translations have been used for a wide range of nlp tasks primarily as a means of data augmentation .", "this work explores , instead , how synthetic translations can be used to revise potentially imperfect reference translations in mined bitext .", "we find that synthetic samples can improve bitext quality without any additional bilingual supervision when they replace the originals based on a semantic equivalence classifier that helps mitigate nmt noise .", "the improved quality of the revised bitext is confirmed intrinsically via human evaluation and extrinsically through bilingual induction and mt tasks ."], "events": [{"event_type": "ITT", "arguments": [{"text": "data augmentation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["data", "augmentation"], "offsets": [17, 18]}], "trigger": {"text": "used", "tokens": ["used"], "offsets": [4]}}, {"event_type": "WKS", "arguments": [{"text": "revise", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["revise"], "offsets": [33]}, {"text": "synthetic translations", "nugget_type": "APP", "argument_type": "Content", "tokens": ["synthetic", "translations"], "offsets": [27, 28]}], "trigger": {"text": "used", "tokens": ["used"], "offsets": [31]}}, {"event_type": "PUR", "arguments": [{"text": "potentially imperfect reference translations", "nugget_type": "WEA", "argument_type": "Aim", "tokens": ["potentially", "imperfect", "reference", "translations"], "offsets": [34, 35, 36, 37]}, {"text": "in mined bitext", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "mined", "bitext"], "offsets": [38, 39, 40]}], "trigger": {"text": "revise", "tokens": ["revise"], "offsets": [33]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [42]}, {"text": "improve", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["improve"], "offsets": [48]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [43]}}, {"event_type": "FAC", "arguments": [{"text": "synthetic samples", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["synthetic", "samples"], "offsets": [45, 46]}, {"text": "bitext quality", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["bitext", "quality"], "offsets": [49, 50]}, {"text": "without any additional bilingual supervision", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["without", "any", "additional", "bilingual", "supervision"], "offsets": [51, 52, 53, 54, 55]}, {"text": "helps mitigate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["helps", "mitigate"], "offsets": [68, 69]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [48]}}, {"event_type": "PUR", "arguments": [{"text": "nmt noise", "nugget_type": "WEA", "argument_type": "Aim", "tokens": ["nmt", "noise"], "offsets": [70, 71]}], "trigger": {"text": "helps mitigate", "tokens": ["helps", "mitigate"], "offsets": [68, 69]}}, {"event_type": "FAC", "arguments": [{"text": "improved quality of the revised bitext", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["improved", "quality", "of", "the", "revised", "bitext"], "offsets": [74, 75, 76, 77, 78, 79]}, {"text": "via human evaluation", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["via", "human", "evaluation"], "offsets": [83, 84, 85]}, {"text": "through bilingual induction and mt tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["through", "bilingual", "induction", "and", "mt", "tasks"], "offsets": [88, 89, 90, 91, 92, 93]}], "trigger": {"text": "confirmed", "tokens": ["confirmed"], "offsets": [81]}}], "document": ["synthetic", "translations", "have", "been", "used", "for", "a", "wide", "range", "of", "nlp", "tasks", "primarily", "as", "a", "means", "of", "data", "augmentation", ".", "this", "work", "explores", ",", "instead", ",", "how", "synthetic", "translations", "can", "be", "used", "to", "revise", "potentially", "imperfect", "reference", "translations", "in", "mined", "bitext", ".", "we", "find", "that", "synthetic", "samples", "can", "improve", "bitext", "quality", "without", "any", "additional", "bilingual", "supervision", "when", "they", "replace", "the", "originals", "based", "on", "a", "semantic", "equivalence", "classifier", "that", "helps", "mitigate", "nmt", "noise", ".", "the", "improved", "quality", "of", "the", "revised", "bitext", "is", "confirmed", "intrinsically", "via", "human", "evaluation", "and", "extrinsically", "through", "bilingual", "induction", "and", "mt", "tasks", "."]}, {"venue": "ACL", "title": "Expertise Style Transfer: A New Task Towards Better Communication between Experts and Laymen", "abstract": "The curse of knowledge can impede communication between experts and laymen. We propose a new task of expertise style transfer and contribute a manually annotated dataset with the goal of alleviating such cognitive biases. Solving this task not only simplifies the professional language, but also improves the accuracy and expertise level of laymen descriptions using simple words. This is a challenging task, unaddressed in previous work, as it requires the models to have expert intelligence in order to modify text with a deep understanding of domain knowledge and structures. We establish the benchmark performance of five state-of-the-art models for style transfer and text simplification. The results demonstrate a significant gap between machine and human performance. We also discuss the challenges of automatic evaluation, to provide insights into future research directions. The dataset is publicly available at https://srhthu.github.io/expertise-style-transfer/.", "doc_id": "da1fef615d0e6cde800a7d15b8881226", "publication_year": 2020, "sentences": ["the curse of knowledge can impede communication between experts and laymen .", "we propose a new task of expertise style transfer and contribute a manually annotated dataset with the goal of alleviating such cognitive biases .", "solving this task not only simplifies the professional language , but also improves the accuracy and expertise level of laymen descriptions using simple words .", "this is a challenging task , unaddressed in previous work , as it requires the models to have expert intelligence in order to modify text with a deep understanding of domain knowledge and structures .", "we establish the benchmark performance of five state - of - the - art models for style transfer and text simplification .", "the results demonstrate a significant gap between machine and human performance .", "we also discuss the challenges of automatic evaluation , to provide insights into future research directions .", "the dataset is publicly available at https : / / srhthu . github . io / expertise - style - transfer / ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [12]}, {"text": "task of expertise style transfer", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["task", "of", "expertise", "style", "transfer"], "offsets": [16, 17, 18, 19, 20]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [13]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [12]}, {"text": "manually annotated dataset", "nugget_type": "DST", "argument_type": "Content", "tokens": ["manually", "annotated", "dataset"], "offsets": [24, 25, 26]}, {"text": "alleviating", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["alleviating"], "offsets": [31]}], "trigger": {"text": "contribute", "tokens": ["contribute"], "offsets": [22]}}, {"event_type": "PUR", "arguments": [{"text": "cognitive biases", "nugget_type": "WEA", "argument_type": "Aim", "tokens": ["cognitive", "biases"], "offsets": [33, 34]}], "trigger": {"text": "alleviating", "tokens": ["alleviating"], "offsets": [31]}}, {"event_type": "PRP", "arguments": [{"text": "style transfer and text simplification", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["style", "transfer", "and", "text", "simplification"], "offsets": [112, 113, 114, 115, 116]}, {"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [96]}, {"text": "benchmark performance of five state - of - the - art models", "nugget_type": "APP", "argument_type": "Content", "tokens": ["benchmark", "performance", "of", "five", "state", "-", "of", "-", "the", "-", "art", "models"], "offsets": [99, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110]}], "trigger": {"text": "establish", "tokens": ["establish"], "offsets": [97]}}, {"event_type": "FAC", "arguments": [{"text": "significant gap between machine performance", "nugget_type": "WEA", "argument_type": "Subject", "tokens": ["significant", "gap", "between", "machine", "performance"], "offsets": [122, 123, 124, 125, 128]}, {"text": "human performance", "nugget_type": "WEA", "argument_type": "Subject", "tokens": ["human", "performance"], "offsets": [127, 128]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [120]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [130]}, {"text": "challenges of automatic evaluation", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["challenges", "of", "automatic", "evaluation"], "offsets": [134, 135, 136, 137]}, {"text": "provide", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["provide"], "offsets": [140]}], "trigger": {"text": "discuss", "tokens": ["discuss"], "offsets": [132]}}, {"event_type": "PUR", "arguments": [{"text": "insights into future research directions", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["insights", "into", "future", "research", "directions"], "offsets": [141, 142, 143, 144, 145]}], "trigger": {"text": "provide", "tokens": ["provide"], "offsets": [140]}}], "document": ["the", "curse", "of", "knowledge", "can", "impede", "communication", "between", "experts", "and", "laymen", ".", "we", "propose", "a", "new", "task", "of", "expertise", "style", "transfer", "and", "contribute", "a", "manually", "annotated", "dataset", "with", "the", "goal", "of", "alleviating", "such", "cognitive", "biases", ".", "solving", "this", "task", "not", "only", "simplifies", "the", "professional", "language", ",", "but", "also", "improves", "the", "accuracy", "and", "expertise", "level", "of", "laymen", "descriptions", "using", "simple", "words", ".", "this", "is", "a", "challenging", "task", ",", "unaddressed", "in", "previous", "work", ",", "as", "it", "requires", "the", "models", "to", "have", "expert", "intelligence", "in", "order", "to", "modify", "text", "with", "a", "deep", "understanding", "of", "domain", "knowledge", "and", "structures", ".", "we", "establish", "the", "benchmark", "performance", "of", "five", "state", "-", "of", "-", "the", "-", "art", "models", "for", "style", "transfer", "and", "text", "simplification", ".", "the", "results", "demonstrate", "a", "significant", "gap", "between", "machine", "and", "human", "performance", ".", "we", "also", "discuss", "the", "challenges", "of", "automatic", "evaluation", ",", "to", "provide", "insights", "into", "future", "research", "directions", ".", "the", "dataset", "is", "publicly", "available", "at", "https", ":", "/", "/", "srhthu", ".", "github", ".", "io", "/", "expertise", "-", "style", "-", "transfer", "/", "."]}, {"venue": "ACL", "title": "Towards Fair Evaluation of Dialogue State Tracking by Flexible Incorporation of Turn-level Performances", "abstract": "Dialogue State Tracking (DST) is primarily evaluated using Joint Goal Accuracy (JGA) defined as the fraction of turns where the ground-truth dialogue state exactly matches the prediction. Generally in DST, the dialogue state or belief state for a given turn contain all the intents shown by the user till that turn. Due to this cumulative nature of the belief state, it is difficult to get a correct prediction once a misprediction has occurred. Thus, although being a useful metric, it can be harsh at times and underestimate the true potential of a DST model. Moreover, an improvement in JGA can sometimes decrease the performance of turn-level or non-cumulative belief state prediction due to inconsistency in annotations. So, using JGA as the only metric for model selection may not be ideal for all scenarios. In this work, we discuss various evaluation metrics used for DST along with their shortcomings. To address the existing issues, we propose a new evaluation metric named Flexible Goal Accuracy (FGA). FGA is a generalized version of JGA. But unlike JGA, it tries to give penalized rewards to mispredictions that are locally correct i.e. the root cause of the error is an earlier turn. By doing so, FGA considers the performance of both cumulative and turn-level prediction flexibly and provides a better insight than the existing metrics. We also show that FGA is a better discriminator of DST model performance.", "doc_id": "7ba160a9eb0c24d40f755e06887a6ecd", "publication_year": 2022, "sentences": ["dialogue state tracking ( dst ) is primarily evaluated using joint goal accuracy ( jga ) defined as the fraction of turns where the ground - truth dialogue state exactly matches the prediction .", "generally in dst , the dialogue state or belief state for a given turn contain all the intents shown by the user till that turn .", "due to this cumulative nature of the belief state , it is difficult to get a correct prediction once a misprediction has occurred .", "thus , although being a useful metric , it can be harsh at times and underestimate the true potential of a dst model .", "moreover , an improvement in jga can sometimes decrease the performance of turn - level or non - cumulative belief state prediction due to inconsistency in annotations .", "so , using jga as the only metric for model selection may not be ideal for all scenarios .", "in this work , we discuss various evaluation metrics used for dst along with their shortcomings .", "to address the existing issues , we propose a new evaluation metric named flexible goal accuracy ( fga ) .", "fga is a generalized version of jga .", "but unlike jga , it tries to give penalized rewards to mispredictions that are locally correct i . e . the root cause of the error is an earlier turn .", "by doing so , fga considers the performance of both cumulative and turn - level prediction flexibly and provides a better insight than the existing metrics .", "we also show that fga is a better discriminator of dst model performance ."], "events": [{"event_type": "ITT", "arguments": [{"text": "dialogue state tracking", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["dialogue", "state", "tracking"], "offsets": [0, 1, 2]}], "trigger": {"text": "evaluated", "tokens": ["evaluated"], "offsets": [8]}}, {"event_type": "RWS", "arguments": [{"text": "dialogue state or belief state", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["dialogue", "state", "or", "belief", "state"], "offsets": [39, 40, 41, 42, 43]}, {"text": "all the intents", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["all", "the", "intents"], "offsets": [49, 50, 51]}], "trigger": {"text": "contain", "tokens": ["contain"], "offsets": [48]}}, {"event_type": "RWF", "arguments": [{"text": "harsh", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["harsh"], "offsets": [95]}, {"text": "at times", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["at", "times"], "offsets": [96, 97]}], "trigger": {"text": "harsh", "tokens": ["harsh"], "offsets": [95]}}, {"event_type": "RWF", "arguments": [{"text": "underestimate", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["underestimate"], "offsets": [99]}, {"text": "belief state", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["belief", "state"], "offsets": [67, 68]}], "trigger": {"text": "underestimate", "tokens": ["underestimate"], "offsets": [99]}}, {"event_type": "RWF", "arguments": [{"text": "performance of turn - level or non - cumulative belief state prediction", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["performance", "of", "turn", "-", "level", "or", "non", "-", "cumulative", "belief", "state", "prediction"], "offsets": [118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129]}, {"text": "inconsistency in annotations", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["inconsistency", "in", "annotations"], "offsets": [132, 133, 134]}, {"text": "sometimes", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["sometimes"], "offsets": [115]}], "trigger": {"text": "decrease", "tokens": ["decrease"], "offsets": [116]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [159]}, {"text": "evaluation metrics", "nugget_type": "APP", "argument_type": "Content", "tokens": ["evaluation", "metrics"], "offsets": [162, 163]}, {"text": "used for dst along with their shortcomings", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["used", "for", "dialogue", "state", "tracking", "along", "with", "their", "shortcomings"], "offsets": [164, 165, 0, 1, 2, 167, 168, 169, 170]}], "trigger": {"text": "discuss", "tokens": ["discuss"], "offsets": [160]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [178]}, {"text": "flexible goal accuracy", "nugget_type": "APP", "argument_type": "Content", "tokens": ["flexible", "goal", "accuracy"], "offsets": [185, 186, 187]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [179]}}, {"event_type": "MDS", "arguments": [{"text": "mispredictions", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["mispredictions"], "offsets": [211]}, {"text": "penalized rewards", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["penalized", "rewards"], "offsets": [208, 209]}], "trigger": {"text": "give", "tokens": ["give"], "offsets": [207]}}, {"event_type": "FAC", "arguments": [{"text": "flexible goal accuracy", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["flexible", "goal", "accuracy"], "offsets": [185, 186, 187]}], "trigger": {"text": "better discriminator of dst model performance", "tokens": ["better", "discriminator", "of", "dst", "model", "performance"], "offsets": [265, 266, 267, 268, 269, 270]}}, {"event_type": "RWF", "arguments": [{"text": "difficult", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["difficult"], "offsets": [72]}, {"text": "get", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["get"], "offsets": [74]}, {"text": "once a misprediction has occurred", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["once", "a", "misprediction", "has", "occurred"], "offsets": [78, 79, 80, 81, 82]}], "trigger": {"text": "difficult", "tokens": ["difficult"], "offsets": [72]}}, {"event_type": "PUR", "arguments": [{"text": "correct prediction", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["correct", "prediction"], "offsets": [76, 77]}], "trigger": {"text": "get", "tokens": ["get"], "offsets": [74]}}, {"event_type": "FAC", "arguments": [{"text": "flexible goal accuracy", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["flexible", "goal", "accuracy"], "offsets": [185, 186, 187]}, {"text": "performance of both cumulative and turn - level prediction flexibly", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["performance", "of", "both", "cumulative", "and", "turn", "-", "level", "prediction", "flexibly"], "offsets": [238, 239, 240, 241, 242, 243, 244, 245, 246, 247]}], "trigger": {"text": "considers", "tokens": ["considers"], "offsets": [236]}}, {"event_type": "CMP", "arguments": [{"text": "flexible goal accuracy", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["flexible", "goal", "accuracy"], "offsets": [185, 186, 187]}, {"text": "existing metrics", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["existing", "metrics"], "offsets": [255, 256]}, {"text": "better insight", "nugget_type": "STR", "argument_type": "Result", "tokens": ["better", "insight"], "offsets": [251, 252]}], "trigger": {"text": "provides", "tokens": ["provides"], "offsets": [249]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [258]}, {"text": "better discriminator of dst model performance", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["better", "discriminator", "of", "dialogue", "state", "tracking", "model", "performance"], "offsets": [265, 266, 267, 0, 1, 2, 269, 270]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [260]}}], "document": ["dialogue", "state", "tracking", "(", "dst", ")", "is", "primarily", "evaluated", "using", "joint", "goal", "accuracy", "(", "jga", ")", "defined", "as", "the", "fraction", "of", "turns", "where", "the", "ground", "-", "truth", "dialogue", "state", "exactly", "matches", "the", "prediction", ".", "generally", "in", "dst", ",", "the", "dialogue", "state", "or", "belief", "state", "for", "a", "given", "turn", "contain", "all", "the", "intents", "shown", "by", "the", "user", "till", "that", "turn", ".", "due", "to", "this", "cumulative", "nature", "of", "the", "belief", "state", ",", "it", "is", "difficult", "to", "get", "a", "correct", "prediction", "once", "a", "misprediction", "has", "occurred", ".", "thus", ",", "although", "being", "a", "useful", "metric", ",", "it", "can", "be", "harsh", "at", "times", "and", "underestimate", "the", "true", "potential", "of", "a", "dst", "model", ".", "moreover", ",", "an", "improvement", "in", "jga", "can", "sometimes", "decrease", "the", "performance", "of", "turn", "-", "level", "or", "non", "-", "cumulative", "belief", "state", "prediction", "due", "to", "inconsistency", "in", "annotations", ".", "so", ",", "using", "jga", "as", "the", "only", "metric", "for", "model", "selection", "may", "not", "be", "ideal", "for", "all", "scenarios", ".", "in", "this", "work", ",", "we", "discuss", "various", "evaluation", "metrics", "used", "for", "dst", "along", "with", "their", "shortcomings", ".", "to", "address", "the", "existing", "issues", ",", "we", "propose", "a", "new", "evaluation", "metric", "named", "flexible", "goal", "accuracy", "(", "fga", ")", ".", "fga", "is", "a", "generalized", "version", "of", "jga", ".", "but", "unlike", "jga", ",", "it", "tries", "to", "give", "penalized", "rewards", "to", "mispredictions", "that", "are", "locally", "correct", "i", ".", "e", ".", "the", "root", "cause", "of", "the", "error", "is", "an", "earlier", "turn", ".", "by", "doing", "so", ",", "fga", "considers", "the", "performance", "of", "both", "cumulative", "and", "turn", "-", "level", "prediction", "flexibly", "and", "provides", "a", "better", "insight", "than", "the", "existing", "metrics", ".", "we", "also", "show", "that", "fga", "is", "a", "better", "discriminator", "of", "dst", "model", "performance", "."]}, {"venue": "ACL", "title": "The Case for Translation-Invariant Self-Attention in Transformer-Based Language Models", "abstract": "Mechanisms for encoding positional information are central for transformer-based language models. In this paper, we analyze the position embeddings of existing language models, finding strong evidence of translation invariance, both for the embeddings themselves and for their effect on self-attention. The degree of translation invariance increases during training and correlates positively with model performance. Our findings lead us to propose translation-invariant self-attention (TISA), which accounts for the relative position between tokens in an interpretable fashion without needing conventional position embeddings. Our proposal has several theoretical advantages over existing position-representation approaches. Proof-of-concept experiments show that it improves on regular ALBERT on GLUE tasks, while only adding orders of magnitude less positional parameters.", "doc_id": "14c9f95dc80ff4ad29844dc2cfa15a89", "publication_year": 2021, "sentences": ["mechanisms for encoding positional information are central for transformer - based language models .", "in this paper , we analyze the position embeddings of existing language models , finding strong evidence of translation invariance , both for the embeddings themselves and for their effect on self - attention .", "the degree of translation invariance increases during training and correlates positively with model performance .", "our findings lead us to propose translation - invariant self - attention ( tisa ) , which accounts for the relative position between tokens in an interpretable fashion without needing conventional position embeddings .", "our proposal has several theoretical advantages over existing position - representation approaches .", "proof - of - concept experiments show that it improves on regular albert on glue tasks , while only adding orders of magnitude less positional parameters ."], "events": [{"event_type": "ITT", "arguments": [{"text": "mechanisms for encoding positional information", "nugget_type": "APP", "argument_type": "Target", "tokens": ["mechanisms", "for", "encoding", "positional", "information"], "offsets": [0, 1, 2, 3, 4]}], "trigger": {"text": "central", "tokens": ["central"], "offsets": [6]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [18]}, {"text": "position embeddings", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["position", "embeddings"], "offsets": [21, 22]}], "trigger": {"text": "analyze", "tokens": ["analyze"], "offsets": [19]}}, {"event_type": "FAC", "arguments": [{"text": "during training", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["during", "training"], "offsets": [55, 56]}, {"text": "degree of translation invariance", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["degree", "of", "translation", "invariance"], "offsets": [50, 51, 52, 53]}], "trigger": {"text": "increases", "tokens": ["increases"], "offsets": [54]}}, {"event_type": "FAC", "arguments": [{"text": "degree of translation invariance", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["degree", "of", "translation", "invariance"], "offsets": [50, 51, 52, 53]}, {"text": "model performance", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["model", "performance"], "offsets": [61, 62]}], "trigger": {"text": "correlates positively", "tokens": ["correlates", "positively"], "offsets": [58, 59]}}, {"event_type": "PRP", "arguments": [{"text": "us", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["us"], "offsets": [67]}, {"text": "translation - invariant self - attention", "nugget_type": "APP", "argument_type": "Content", "tokens": ["translation", "-", "invariant", "self", "-", "attention"], "offsets": [70, 71, 72, 73, 74, 75]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [69]}}, {"event_type": "MDS", "arguments": [{"text": "relative position between tokens", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["relative", "position", "between", "tokens"], "offsets": [84, 85, 86, 87]}, {"text": "in an interpretable fashion", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "an", "interpretable", "fashion"], "offsets": [88, 89, 90, 91]}, {"text": "without needing conventional position embeddings", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["without", "needing", "conventional", "position", "embeddings"], "offsets": [92, 93, 94, 95, 96]}], "trigger": {"text": "accounts", "tokens": ["accounts"], "offsets": [81]}}, {"event_type": "CMP", "arguments": [{"text": "translation - invariant self - attention", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["translation", "-", "invariant", "self", "-", "attention"], "offsets": [70, 71, 72, 73, 74, 75]}, {"text": "theoretical advantages", "nugget_type": "STR", "argument_type": "Result", "tokens": ["theoretical", "advantages"], "offsets": [102, 103]}, {"text": "existing position - representation approaches", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["existing", "position", "-", "representation", "approaches"], "offsets": [105, 106, 107, 108, 109]}], "trigger": {"text": "has", "tokens": ["has"], "offsets": [100]}}, {"event_type": "FAC", "arguments": [{"text": "translation - invariant self - attention", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["translation", "-", "invariant", "self", "-", "attention"], "offsets": [70, 71, 72, 73, 74, 75]}, {"text": "regular albert", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["regular", "albert"], "offsets": [122, 123]}, {"text": "glue tasks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["glue", "tasks"], "offsets": [125, 126]}, {"text": "adding orders of magnitude less positional parameters", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["adding", "orders", "of", "magnitude", "less", "positional", "parameters"], "offsets": [130, 131, 132, 133, 134, 135, 136]}], "trigger": {"text": "improves on", "tokens": ["improves", "on"], "offsets": [120, 121]}}, {"event_type": "FAC", "arguments": [{"text": "strong evidence", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["strong", "evidence"], "offsets": [29, 30]}, {"text": "for the embeddings themselves", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["for", "the", "embeddings", "themselves"], "offsets": [36, 37, 38, 39]}, {"text": "for their effect on self - attention", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["for", "their", "effect", "on", "self", "-", "attention"], "offsets": [41, 42, 43, 44, 45, 46, 47]}, {"text": "translation invariance", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["translation", "invariance"], "offsets": [32, 33]}], "trigger": {"text": "finding", "tokens": ["finding"], "offsets": [28]}}], "document": ["mechanisms", "for", "encoding", "positional", "information", "are", "central", "for", "transformer", "-", "based", "language", "models", ".", "in", "this", "paper", ",", "we", "analyze", "the", "position", "embeddings", "of", "existing", "language", "models", ",", "finding", "strong", "evidence", "of", "translation", "invariance", ",", "both", "for", "the", "embeddings", "themselves", "and", "for", "their", "effect", "on", "self", "-", "attention", ".", "the", "degree", "of", "translation", "invariance", "increases", "during", "training", "and", "correlates", "positively", "with", "model", "performance", ".", "our", "findings", "lead", "us", "to", "propose", "translation", "-", "invariant", "self", "-", "attention", "(", "tisa", ")", ",", "which", "accounts", "for", "the", "relative", "position", "between", "tokens", "in", "an", "interpretable", "fashion", "without", "needing", "conventional", "position", "embeddings", ".", "our", "proposal", "has", "several", "theoretical", "advantages", "over", "existing", "position", "-", "representation", "approaches", ".", "proof", "-", "of", "-", "concept", "experiments", "show", "that", "it", "improves", "on", "regular", "albert", "on", "glue", "tasks", ",", "while", "only", "adding", "orders", "of", "magnitude", "less", "positional", "parameters", "."]}, {"venue": "ACL", "title": "Exploring Contextual Word-level Style Relevance for Unsupervised Style Transfer", "abstract": "Unsupervised style transfer aims to change the style of an input sentence while preserving its original content without using parallel training data. In current dominant approaches, owing to the lack of fine-grained control on the influence from the target style, they are unable to yield desirable output sentences. In this paper, we propose a novel attentional sequence-to-sequence (Seq2seq) model that dynamically exploits the relevance of each output word to the target style for unsupervised style transfer. Specifically, we first pretrain a style classifier, where the relevance of each input word to the original style can be quantified via layer-wise relevance propagation. In a denoising auto-encoding manner, we train an attentional Seq2seq model to reconstruct input sentences and repredict word-level previously-quantified style relevance simultaneously. In this way, this model is endowed with the ability to automatically predict the style relevance of each output word. Then, we equip the decoder of this model with a neural style component to exploit the predicted wordlevel style relevance for better style transfer. Particularly, we fine-tune this model using a carefully-designed objective function involving style transfer, style relevance consistency, content preservation and fluency modeling loss terms. Experimental results show that our proposed model achieves state-of-the-art performance in terms of both transfer accuracy and content preservation.", "doc_id": "309845a24b34fa49ac837c5da545127a", "publication_year": 2020, "sentences": ["unsupervised style transfer aims to change the style of an input sentence while preserving its original content without using parallel training data .", "in current dominant approaches , owing to the lack of fine - grained control on the influence from the target style , they are unable to yield desirable output sentences .", "in this paper , we propose a novel attentional sequence - to - sequence ( seq2seq ) model that dynamically exploits the relevance of each output word to the target style for unsupervised style transfer .", "specifically , we first pretrain a style classifier , where the relevance of each input word to the original style can be quantified via layer - wise relevance propagation .", "in a denoising auto - encoding manner , we train an attentional seq2seq model to reconstruct input sentences and repredict word - level previously - quantified style relevance simultaneously .", "in this way , this model is endowed with the ability to automatically predict the style relevance of each output word .", "then , we equip the decoder of this model with a neural style component to exploit the predicted wordlevel style relevance for better style transfer .", "particularly , we fine - tune this model using a carefully - designed objective function involving style transfer , style relevance consistency , content preservation and fluency modeling loss terms .", "experimental results show that our proposed model achieves state - of - the - art performance in terms of both transfer accuracy and content preservation ."], "events": [{"event_type": "ITT", "arguments": [{"text": "unsupervised style transfer", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["unsupervised", "style", "transfer"], "offsets": [0, 1, 2]}], "trigger": {"text": "change", "tokens": ["change"], "offsets": [5]}}, {"event_type": "RWF", "arguments": [{"text": "unable", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["unable"], "offsets": [47]}, {"text": "yield", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["yield"], "offsets": [49]}], "trigger": {"text": "unable", "tokens": ["unable"], "offsets": [47]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [58]}, {"text": "unsupervised style transfer", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["unsupervised", "style", "transfer"], "offsets": [86, 87, 88]}, {"text": "attentional sequence - to - sequence ( seq2seq ) model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["attentional", "sequence", "-", "to", "-", "sequence", "model"], "offsets": [62, 63, 64, 65, 66, 67, 71]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [59]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [92]}, {"text": "style classifier", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["style", "classifier"], "offsets": [96, 97]}], "trigger": {"text": "pretrain", "tokens": ["pretrain"], "offsets": [94]}}, {"event_type": "MDS", "arguments": [{"text": "reconstruct", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["reconstruct"], "offsets": [135]}, {"text": "repredict", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["repredict"], "offsets": [139]}, {"text": "in a denoising auto - encoding manner", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "a", "denoising", "auto", "-", "encoding", "manner"], "offsets": [120, 121, 122, 123, 124, 125, 126]}, {"text": "attentional seq2seq model", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["attentional", "seq2seq", "model"], "offsets": [131, 132, 133]}], "trigger": {"text": "train", "tokens": ["train"], "offsets": [129]}}, {"event_type": "PUR", "arguments": [{"text": "input sentences", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["input", "sentences"], "offsets": [136, 137]}], "trigger": {"text": "reconstruct", "tokens": ["reconstruct"], "offsets": [135]}}, {"event_type": "PUR", "arguments": [{"text": "word - level previously - quantified style relevance simultaneously", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["word", "-", "level", "previously", "-", "quantified", "style", "relevance", "simultaneously"], "offsets": [140, 141, 142, 143, 144, 145, 146, 147, 148]}], "trigger": {"text": "repredict", "tokens": ["repredict"], "offsets": [139]}}, {"event_type": "RWS", "arguments": [{"text": "decoder of this model", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["decoder", "of", "this", "model"], "offsets": [177, 178, 179, 180]}, {"text": "neural style component", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["neural", "style", "component"], "offsets": [183, 184, 185]}, {"text": "exploit", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["exploit"], "offsets": [187]}], "trigger": {"text": "equip", "tokens": ["equip"], "offsets": [175]}}, {"event_type": "PUR", "arguments": [{"text": "predicted wordlevel style relevance", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["predicted", "wordlevel", "style", "relevance"], "offsets": [189, 190, 191, 192]}], "trigger": {"text": "exploit", "tokens": ["exploit"], "offsets": [187]}}, {"event_type": "MDS", "arguments": [{"text": "fine - tune", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["fine", "-", "tune"], "offsets": [201, 202, 203]}, {"text": "carefully - designed objective function", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["carefully", "-", "designed", "objective", "function"], "offsets": [208, 209, 210, 211, 212]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [206]}}, {"event_type": "PUR", "arguments": [{"text": "model", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["model"], "offsets": [205]}], "trigger": {"text": "fine - tune", "tokens": ["fine", "-", "tune"], "offsets": [201, 202, 203]}}, {"event_type": "FIN", "arguments": [{"text": "achieves", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["achieves"], "offsets": [236]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [231]}}, {"event_type": "FAC", "arguments": [{"text": "attentional sequence - to - sequence ( seq2seq ) model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["attentional", "sequence", "-", "to", "-", "sequence", "model"], "offsets": [62, 63, 64, 65, 66, 67, 71]}, {"text": "state - of - the - art performance", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art", "performance"], "offsets": [237, 238, 239, 240, 241, 242, 243, 244]}, {"text": "transfer accuracy", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["transfer", "accuracy"], "offsets": [249, 250]}, {"text": "content preservation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["content", "preservation"], "offsets": [252, 253]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [236]}}, {"event_type": "FAC", "arguments": [{"text": "attentional sequence - to - sequence ( seq2seq ) model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["attentional", "sequence", "-", "to", "-", "sequence", "model"], "offsets": [62, 63, 64, 65, 66, 67, 71]}, {"text": "state - of - the - art performance", "nugget_type": "STR", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art", "performance"], "offsets": [237, 238, 239, 240, 241, 242, 243, 244]}, {"text": "transfer accuracy", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["transfer", "accuracy"], "offsets": [249, 250]}, {"text": "content preservation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["content", "preservation"], "offsets": [252, 253]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [236]}}, {"event_type": "PUR", "arguments": [{"text": "desirable output sentences", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["desirable", "output", "sentences"], "offsets": [50, 51, 52]}], "trigger": {"text": "yield", "tokens": ["yield"], "offsets": [49]}}], "document": ["unsupervised", "style", "transfer", "aims", "to", "change", "the", "style", "of", "an", "input", "sentence", "while", "preserving", "its", "original", "content", "without", "using", "parallel", "training", "data", ".", "in", "current", "dominant", "approaches", ",", "owing", "to", "the", "lack", "of", "fine", "-", "grained", "control", "on", "the", "influence", "from", "the", "target", "style", ",", "they", "are", "unable", "to", "yield", "desirable", "output", "sentences", ".", "in", "this", "paper", ",", "we", "propose", "a", "novel", "attentional", "sequence", "-", "to", "-", "sequence", "(", "seq2seq", ")", "model", "that", "dynamically", "exploits", "the", "relevance", "of", "each", "output", "word", "to", "the", "target", "style", "for", "unsupervised", "style", "transfer", ".", "specifically", ",", "we", "first", "pretrain", "a", "style", "classifier", ",", "where", "the", "relevance", "of", "each", "input", "word", "to", "the", "original", "style", "can", "be", "quantified", "via", "layer", "-", "wise", "relevance", "propagation", ".", "in", "a", "denoising", "auto", "-", "encoding", "manner", ",", "we", "train", "an", "attentional", "seq2seq", "model", "to", "reconstruct", "input", "sentences", "and", "repredict", "word", "-", "level", "previously", "-", "quantified", "style", "relevance", "simultaneously", ".", "in", "this", "way", ",", "this", "model", "is", "endowed", "with", "the", "ability", "to", "automatically", "predict", "the", "style", "relevance", "of", "each", "output", "word", ".", "then", ",", "we", "equip", "the", "decoder", "of", "this", "model", "with", "a", "neural", "style", "component", "to", "exploit", "the", "predicted", "wordlevel", "style", "relevance", "for", "better", "style", "transfer", ".", "particularly", ",", "we", "fine", "-", "tune", "this", "model", "using", "a", "carefully", "-", "designed", "objective", "function", "involving", "style", "transfer", ",", "style", "relevance", "consistency", ",", "content", "preservation", "and", "fluency", "modeling", "loss", "terms", ".", "experimental", "results", "show", "that", "our", "proposed", "model", "achieves", "state", "-", "of", "-", "the", "-", "art", "performance", "in", "terms", "of", "both", "transfer", "accuracy", "and", "content", "preservation", "."]}, {"venue": "ACL", "title": "An Online Semantic-enhanced Dirichlet Model for Short Text Stream Clustering", "abstract": "Clustering short text streams is a challenging task due to its unique properties: infinite length, sparse data representation and cluster evolution. Existing approaches often exploit short text streams in a batch way. However, determine the optimal batch size is usually a difficult task since we have no priori knowledge when the topics evolve. In addition, traditional independent word representation in graphical model tends to cause \u201cterm ambiguity\u201d problem in short text clustering. Therefore, in this paper, we propose an Online Semantic-enhanced Dirichlet Model for short sext stream clustering, called OSDM, which integrates the word-occurance semantic information (i.e., context) into a new graphical model and clusters each arriving short text automatically in an online way. Extensive results have demonstrated that OSDM has better performance compared to many state-of-the-art algorithms on both synthetic and real-world data sets.", "doc_id": "8ef4fcb6b8d058bab59221fb633526ee", "publication_year": 2020, "sentences": ["clustering short text streams is a challenging task due to its unique properties : infinite length , sparse data representation and cluster evolution .", "existing approaches often exploit short text streams in a batch way .", "however , determine the optimal batch size is usually a difficult task since we have no priori knowledge when the topics evolve .", "in addition , traditional independent word representation in graphical model tends to cause \u201c term ambiguity \u201d problem in short text clustering .", "therefore , in this paper , we propose an online semantic - enhanced dirichlet model for short sext stream clustering , called osdm , which integrates the word - occurance semantic information ( i . e . , context ) into a new graphical model and clusters each arriving short text automatically in an online way .", "extensive results have demonstrated that osdm has better performance compared to many state - of - the - art algorithms on both synthetic and real - world data sets ."], "events": [{"event_type": "ITT", "arguments": [{"text": "clustering short text streams", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["clustering", "short", "text", "streams"], "offsets": [0, 1, 2, 3]}], "trigger": {"text": "task", "tokens": ["task"], "offsets": [7]}}, {"event_type": "RWS", "arguments": [{"text": "existing approaches", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["existing", "approaches"], "offsets": [24, 25]}, {"text": "short text streams", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["short", "text", "streams"], "offsets": [28, 29, 30]}, {"text": "in a batch way", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "a", "batch", "way"], "offsets": [31, 32, 33, 34]}], "trigger": {"text": "exploit", "tokens": ["exploit"], "offsets": [27]}}, {"event_type": "RWF", "arguments": [{"text": "when the topics evolve", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "the", "topics", "evolve"], "offsets": [54, 55, 56, 57]}, {"text": "no priori knowledge", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["no", "priori", "knowledge"], "offsets": [51, 52, 53]}], "trigger": {"text": "have", "tokens": ["have"], "offsets": [50]}}, {"event_type": "RWF", "arguments": [{"text": "traditional independent word representation in graphical model", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["traditional", "independent", "word", "representation", "in", "graphical", "model"], "offsets": [62, 63, 64, 65, 66, 67, 68]}, {"text": "term ambiguity", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["term", "ambiguity"], "offsets": [73, 74]}], "trigger": {"text": "cause", "tokens": ["cause"], "offsets": [71]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [88]}, {"text": "online semantic - enhanced dirichlet model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["online", "semantic", "-", "enhanced", "dirichlet", "model"], "offsets": [91, 92, 93, 94, 95, 96]}, {"text": "short sext stream clustering", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["short", "sext", "stream", "clustering"], "offsets": [98, 99, 100, 101]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [89]}}, {"event_type": "MDS", "arguments": [{"text": "word - occurance semantic information", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["word", "-", "occurance", "semantic", "information"], "offsets": [109, 110, 111, 112, 113]}, {"text": "new graphical model", "nugget_type": "APP", "argument_type": "Target", "tokens": ["new", "graphical", "model"], "offsets": [124, 125, 126]}], "trigger": {"text": "integrates", "tokens": ["integrates"], "offsets": [107]}}, {"event_type": "MDS", "arguments": [{"text": "each arriving short text", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["each", "arriving", "short", "text"], "offsets": [129, 130, 131, 132]}, {"text": "in an online way", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "an", "online", "way"], "offsets": [134, 135, 136, 137]}], "trigger": {"text": "clusters", "tokens": ["clusters"], "offsets": [128]}}, {"event_type": "FIN", "arguments": [{"text": "has", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["has"], "offsets": [145]}], "trigger": {"text": "demonstrated", "tokens": ["demonstrated"], "offsets": [142]}}, {"event_type": "CMP", "arguments": [{"text": "online semantic - enhanced dirichlet model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["online", "semantic", "-", "enhanced", "dirichlet", "model"], "offsets": [91, 92, 93, 94, 95, 96]}, {"text": "many state - of - the - art algorithms", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["many", "state", "-", "of", "-", "the", "-", "art", "algorithms"], "offsets": [150, 151, 152, 153, 154, 155, 156, 157, 158]}, {"text": "synthetic and real - world data sets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["synthetic", "and", "real", "-", "world", "data", "sets"], "offsets": [161, 162, 163, 164, 165, 166, 167]}, {"text": "better performance", "nugget_type": "STR", "argument_type": "Result", "tokens": ["better", "performance"], "offsets": [146, 147]}], "trigger": {"text": "has", "tokens": ["has"], "offsets": [145]}}], "document": ["clustering", "short", "text", "streams", "is", "a", "challenging", "task", "due", "to", "its", "unique", "properties", ":", "infinite", "length", ",", "sparse", "data", "representation", "and", "cluster", "evolution", ".", "existing", "approaches", "often", "exploit", "short", "text", "streams", "in", "a", "batch", "way", ".", "however", ",", "determine", "the", "optimal", "batch", "size", "is", "usually", "a", "difficult", "task", "since", "we", "have", "no", "priori", "knowledge", "when", "the", "topics", "evolve", ".", "in", "addition", ",", "traditional", "independent", "word", "representation", "in", "graphical", "model", "tends", "to", "cause", "\u201c", "term", "ambiguity", "\u201d", "problem", "in", "short", "text", "clustering", ".", "therefore", ",", "in", "this", "paper", ",", "we", "propose", "an", "online", "semantic", "-", "enhanced", "dirichlet", "model", "for", "short", "sext", "stream", "clustering", ",", "called", "osdm", ",", "which", "integrates", "the", "word", "-", "occurance", "semantic", "information", "(", "i", ".", "e", ".", ",", "context", ")", "into", "a", "new", "graphical", "model", "and", "clusters", "each", "arriving", "short", "text", "automatically", "in", "an", "online", "way", ".", "extensive", "results", "have", "demonstrated", "that", "osdm", "has", "better", "performance", "compared", "to", "many", "state", "-", "of", "-", "the", "-", "art", "algorithms", "on", "both", "synthetic", "and", "real", "-", "world", "data", "sets", "."]}, {"venue": "ACL", "title": "Ensuring Readability and Data-fidelity using Head-modifier Templates in Deep Type Description Generation", "abstract": "A type description is a succinct noun compound which helps human and machines to quickly grasp the informative and distinctive information of an entity. Entities in most knowledge graphs (KGs) still lack such descriptions, thus calling for automatic methods to supplement such information. However, existing generative methods either overlook the grammatical structure or make factual mistakes in generated texts. To solve these problems, we propose a head-modifier template based method to ensure the readability and data fidelity of generated type descriptions. We also propose a new dataset and two metrics for this task. Experiments show that our method improves substantially compared with baselines and achieves state-of-the-art performance on both datasets.", "doc_id": "8f972fa85b7f21959493006c1561db72", "publication_year": 2019, "sentences": ["a type description is a succinct noun compound which helps human and machines to quickly grasp the informative and distinctive information of an entity .", "entities in most knowledge graphs ( kgs ) still lack such descriptions , thus calling for automatic methods to supplement such information .", "however , existing generative methods either overlook the grammatical structure or make factual mistakes in generated texts .", "to solve these problems , we propose a head - modifier template based method to ensure the readability and data fidelity of generated type descriptions .", "we also propose a new dataset and two metrics for this task .", "experiments show that our method improves substantially compared with baselines and achieves state - of - the - art performance on both datasets ."], "events": [{"event_type": "ITT", "arguments": [{"text": "type description", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["type", "description"], "offsets": [1, 2]}], "trigger": {"text": "compound", "tokens": ["compound"], "offsets": [7]}}, {"event_type": "RWF", "arguments": [{"text": "existing generative methods", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["existing", "generative", "methods"], "offsets": [50, 51, 52]}, {"text": "grammatical structure", "nugget_type": "MOD", "argument_type": "Fault", "tokens": ["grammatical", "structure"], "offsets": [56, 57]}], "trigger": {"text": "overlook", "tokens": ["overlook"], "offsets": [54]}}, {"event_type": "RWF", "arguments": [{"text": "existing generative methods", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["existing", "generative", "methods"], "offsets": [50, 51, 52]}, {"text": "factual mistakes", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["factual", "mistakes"], "offsets": [60, 61]}], "trigger": {"text": "make", "tokens": ["make"], "offsets": [59]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [71]}, {"text": "head - modifier template based method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["head", "-", "modifier", "template", "based", "method"], "offsets": [74, 75, 76, 77, 78, 79]}, {"text": "ensure", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["ensure"], "offsets": [81]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [72]}}, {"event_type": "PUR", "arguments": [{"text": "readability of generated type descriptions", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["readability", "of", "generated", "type", "descriptions"], "offsets": [83, 87, 88, 89, 90]}, {"text": "data fidelity of generated type descriptions", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["data", "fidelity", "of", "generated", "type", "descriptions"], "offsets": [85, 86, 87, 88, 89, 90]}], "trigger": {"text": "ensure", "tokens": ["ensure"], "offsets": [81]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [92]}, {"text": "new dataset", "nugget_type": "DST", "argument_type": "Content", "tokens": ["new", "dataset"], "offsets": [96, 97]}, {"text": "two metrics", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["two", "metrics"], "offsets": [99, 100]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [94]}}, {"event_type": "FIN", "arguments": [{"text": "improves", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["improves"], "offsets": [110]}, {"text": "achieves", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["achieves"], "offsets": [116]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [106]}}, {"event_type": "CMP", "arguments": [{"text": "head - modifier template based method", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["head", "-", "modifier", "template", "based", "method"], "offsets": [74, 75, 76, 77, 78, 79]}, {"text": "improves", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improves"], "offsets": [110]}, {"text": "substantially", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["substantially"], "offsets": [111]}], "trigger": {"text": "improves", "tokens": ["improves"], "offsets": [110]}}, {"event_type": "FAC", "arguments": [{"text": "head - modifier template based method", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["head", "-", "modifier", "template", "based", "method"], "offsets": [74, 75, 76, 77, 78, 79]}, {"text": "state - of - the - art performance", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art", "performance"], "offsets": [117, 118, 119, 120, 121, 122, 123, 124]}, {"text": "both datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["both", "datasets"], "offsets": [126, 127]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [116]}}], "document": ["a", "type", "description", "is", "a", "succinct", "noun", "compound", "which", "helps", "human", "and", "machines", "to", "quickly", "grasp", "the", "informative", "and", "distinctive", "information", "of", "an", "entity", ".", "entities", "in", "most", "knowledge", "graphs", "(", "kgs", ")", "still", "lack", "such", "descriptions", ",", "thus", "calling", "for", "automatic", "methods", "to", "supplement", "such", "information", ".", "however", ",", "existing", "generative", "methods", "either", "overlook", "the", "grammatical", "structure", "or", "make", "factual", "mistakes", "in", "generated", "texts", ".", "to", "solve", "these", "problems", ",", "we", "propose", "a", "head", "-", "modifier", "template", "based", "method", "to", "ensure", "the", "readability", "and", "data", "fidelity", "of", "generated", "type", "descriptions", ".", "we", "also", "propose", "a", "new", "dataset", "and", "two", "metrics", "for", "this", "task", ".", "experiments", "show", "that", "our", "method", "improves", "substantially", "compared", "with", "baselines", "and", "achieves", "state", "-", "of", "-", "the", "-", "art", "performance", "on", "both", "datasets", "."]}, {"venue": "ACL", "title": "Efficient Cluster-Based k-Nearest-Neighbor Machine Translation", "abstract": "k-Nearest-Neighbor Machine Translation (kNN-MT) has been recently proposed as a non-parametric solution for domain adaptation in neural machine translation (NMT). It aims to alleviate the performance degradation of advanced MT systems in translating out-of-domain sentences by coordinating with an additional token-level feature-based retrieval module constructed from in-domain data. Previous studies (Khandelwal et al., 2021; Zheng et al., 2021) have already demonstrated that non-parametric NMT is even superior to models fine-tuned on out-of-domain data. In spite of this success, kNN retrieval is at the expense of high latency, in particular for large datastores. To make it practical, in this paper, we explore a more efficient kNN-MT and propose to use clustering to improve the retrieval efficiency. Concretely, we first propose a cluster-based Compact Network for feature reduction in a contrastive learning manner to compress context features into 90+% lower dimensional vectors. We then suggest a cluster-based pruning solution to filter out 10% 40% redundant nodes in large datastores while retaining translation quality. Our proposed methods achieve better or comparable performance while reducing up to 57% inference latency against the advanced non-parametric MT model on several machine translation benchmarks. Experimental results indicate that the proposed methods maintain the most useful information of the original datastore and the Compact Network shows good generalization on unseen domains. Codes are available at https://github.com/tjunlp-lab/PCKMT.", "doc_id": "0fb2da05a8577228cd3bd333d961b8e4", "publication_year": 2022, "sentences": ["k - nearest - neighbor machine translation ( knn - mt ) has been recently proposed as a non - parametric solution for domain adaptation in neural machine translation ( nmt ) .", "it aims to alleviate the performance degradation of advanced mt systems in translating out - of - domain sentences by coordinating with an additional token - level feature - based retrieval module constructed from in - domain data .", "previous studies ( khandelwal et al . , 2021 ; zheng et al . , 2021 ) have already demonstrated that non - parametric nmt is even superior to models fine - tuned on out - of - domain data .", "in spite of this success , knn retrieval is at the expense of high latency , in particular for large datastores .", "to make it practical , in this paper , we explore a more efficient knn - mt and propose to use clustering to improve the retrieval efficiency .", "concretely , we first propose a cluster - based compact network for feature reduction in a contrastive learning manner to compress context features into 90 + % lower dimensional vectors .", "we then suggest a cluster - based pruning solution to filter out 10 % 40 % redundant nodes in large datastores while retaining translation quality .", "our proposed methods achieve better or comparable performance while reducing up to 57 % inference latency against the advanced non - parametric mt model on several machine translation benchmarks .", "experimental results indicate that the proposed methods maintain the most useful information of the original datastore and the compact network shows good generalization on unseen domains .", "codes are available at https : / / github . com / tjunlp - lab / pckmt ."], "events": [{"event_type": "ITT", "arguments": [{"text": "k - nearest - neighbor machine translation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["k", "-", "nearest", "-", "neighbor", "machine", "translation"], "offsets": [0, 1, 2, 3, 4, 5, 6]}], "trigger": {"text": "proposed", "tokens": ["proposed"], "offsets": [15]}}, {"event_type": "RWS", "arguments": [{"text": "additional token - level feature - based retrieval module", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["additional", "token", "-", "level", "feature", "-", "based", "retrieval", "module"], "offsets": [56, 57, 58, 59, 60, 61, 62, 63, 64]}, {"text": "in - domain data", "nugget_type": "DST", "argument_type": "BaseComponent", "tokens": ["in", "-", "domain", "data"], "offsets": [67, 68, 69, 70]}, {"text": "alleviate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["alleviate"], "offsets": [36]}], "trigger": {"text": "constructed", "tokens": ["constructed"], "offsets": [65]}}, {"event_type": "RWF", "arguments": [{"text": "large datastores", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["large", "datastores"], "offsets": [132, 133]}, {"text": "knn retrieval", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["k", "-", "nearest", "-", "neighbor", "retrieval"], "offsets": [0, 1, 2, 3, 4, 120]}], "trigger": {"text": "expense of high latency", "tokens": ["expense", "of", "high", "latency"], "offsets": [124, 125, 126, 127]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [144]}, {"text": "more efficient knn - mt", "nugget_type": "APP", "argument_type": "Content", "tokens": ["more", "efficient", "k", "-", "nearest", "-", "neighbor", "machine", "translation"], "offsets": [147, 148, 0, 1, 2, 3, 4, 5, 6]}], "trigger": {"text": "explore", "tokens": ["explore"], "offsets": [145]}}, {"event_type": "WKS", "arguments": [{"text": "clustering", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["clustering"], "offsets": [156]}, {"text": "improve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improve"], "offsets": [158]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [155]}}, {"event_type": "PUR", "arguments": [{"text": "retrieval efficiency", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["retrieval", "efficiency"], "offsets": [160, 161]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [158]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [165]}, {"text": "cluster - based compact network", "nugget_type": "APP", "argument_type": "Content", "tokens": ["cluster", "-", "based", "compact", "network"], "offsets": [169, 170, 171, 172, 173]}, {"text": "feature reduction", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["feature", "reduction"], "offsets": [175, 176]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [167]}}, {"event_type": "MDS", "arguments": [{"text": "90 + % lower dimensional vectors", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["90", "+", "%", "lower", "dimensional", "vectors"], "offsets": [187, 188, 189, 190, 191, 192]}, {"text": "context features", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["context", "features"], "offsets": [184, 185]}], "trigger": {"text": "compress", "tokens": ["compress"], "offsets": [183]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [194]}, {"text": "cluster - based pruning solution", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["cluster", "-", "based", "pruning", "solution"], "offsets": [198, 199, 200, 201, 202]}], "trigger": {"text": "suggest", "tokens": ["suggest"], "offsets": [196]}}, {"event_type": "MDS", "arguments": [{"text": "10 % 40 % redundant nodes", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["10", "%", "40", "%", "redundant", "nodes"], "offsets": [206, 207, 208, 209, 210, 211]}, {"text": "large datastores", "nugget_type": "DST", "argument_type": "BaseComponent", "tokens": ["large", "datastores"], "offsets": [213, 214]}, {"text": "while retaining translation quality", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["while", "retaining", "translation", "quality"], "offsets": [215, 216, 217, 218]}], "trigger": {"text": "filter", "tokens": ["filter"], "offsets": [204]}}, {"event_type": "CMP", "arguments": [{"text": "cluster - based compact network", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["cluster", "-", "based", "compact", "network"], "offsets": [169, 170, 171, 172, 173]}, {"text": "better", "nugget_type": "STR", "argument_type": "Result", "tokens": ["better"], "offsets": [224]}, {"text": "comparable", "nugget_type": "STR", "argument_type": "Result", "tokens": ["comparable"], "offsets": [226]}, {"text": "performance", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["performance"], "offsets": [227]}], "trigger": {"text": "achieve", "tokens": ["achieve"], "offsets": [223]}}, {"event_type": "CMP", "arguments": [{"text": "cluster - based compact network", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["cluster", "-", "based", "compact", "network"], "offsets": [169, 170, 171, 172, 173]}, {"text": "57 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["57", "%"], "offsets": [232, 233]}, {"text": "inference latency", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["inference", "latency"], "offsets": [234, 235]}, {"text": "advanced non - parametric mt model", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["advanced", "non", "-", "parametric", "machine", "translation", "model"], "offsets": [238, 239, 240, 241, 5, 6, 243]}, {"text": "on several machine translation benchmarks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "several", "machine", "translation", "benchmarks"], "offsets": [244, 245, 246, 247, 248]}], "trigger": {"text": "reducing", "tokens": ["reducing"], "offsets": [229]}}, {"event_type": "FIN", "arguments": [{"text": "maintain", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["maintain"], "offsets": [257]}], "trigger": {"text": "indicate", "tokens": ["indicate"], "offsets": [252]}}, {"event_type": "FAC", "arguments": [{"text": "cluster - based compact network", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["cluster", "-", "based", "compact", "network"], "offsets": [169, 170, 171, 172, 173]}, {"text": "most", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["most"], "offsets": [259]}, {"text": "useful information of the original datastore", "nugget_type": "STR", "argument_type": "Object", "tokens": ["useful", "information", "of", "the", "original", "datastore"], "offsets": [260, 261, 262, 263, 264, 265]}], "trigger": {"text": "maintain", "tokens": ["maintain"], "offsets": [257]}}, {"event_type": "FIN", "arguments": [{"text": "shows", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["shows"], "offsets": [270]}], "trigger": {"text": "indicate", "tokens": ["indicate"], "offsets": [252]}}, {"event_type": "FAC", "arguments": [{"text": "cluster - based compact network", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["cluster", "-", "based", "compact", "network"], "offsets": [169, 170, 171, 172, 173]}, {"text": "good generalization", "nugget_type": "STR", "argument_type": "Object", "tokens": ["good", "generalization"], "offsets": [271, 272]}, {"text": "on unseen domains", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "unseen", "domains"], "offsets": [273, 274, 275]}], "trigger": {"text": "shows", "tokens": ["shows"], "offsets": [270]}}, {"event_type": "PUR", "arguments": [{"text": "performance degradation of advanced mt systems", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["performance", "degradation", "of", "advanced", "machine", "translation", "systems"], "offsets": [38, 39, 40, 41, 5, 6, 43]}, {"text": "in translating out - of - domain sentences", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "translating", "out", "-", "of", "-", "domain", "sentences"], "offsets": [44, 45, 46, 47, 48, 49, 50, 51]}], "trigger": {"text": "alleviate", "tokens": ["alleviate"], "offsets": [36]}}], "document": ["k", "-", "nearest", "-", "neighbor", "machine", "translation", "(", "knn", "-", "mt", ")", "has", "been", "recently", "proposed", "as", "a", "non", "-", "parametric", "solution", "for", "domain", "adaptation", "in", "neural", "machine", "translation", "(", "nmt", ")", ".", "it", "aims", "to", "alleviate", "the", "performance", "degradation", "of", "advanced", "mt", "systems", "in", "translating", "out", "-", "of", "-", "domain", "sentences", "by", "coordinating", "with", "an", "additional", "token", "-", "level", "feature", "-", "based", "retrieval", "module", "constructed", "from", "in", "-", "domain", "data", ".", "previous", "studies", "(", "khandelwal", "et", "al", ".", ",", "2021", ";", "zheng", "et", "al", ".", ",", "2021", ")", "have", "already", "demonstrated", "that", "non", "-", "parametric", "nmt", "is", "even", "superior", "to", "models", "fine", "-", "tuned", "on", "out", "-", "of", "-", "domain", "data", ".", "in", "spite", "of", "this", "success", ",", "knn", "retrieval", "is", "at", "the", "expense", "of", "high", "latency", ",", "in", "particular", "for", "large", "datastores", ".", "to", "make", "it", "practical", ",", "in", "this", "paper", ",", "we", "explore", "a", "more", "efficient", "knn", "-", "mt", "and", "propose", "to", "use", "clustering", "to", "improve", "the", "retrieval", "efficiency", ".", "concretely", ",", "we", "first", "propose", "a", "cluster", "-", "based", "compact", "network", "for", "feature", "reduction", "in", "a", "contrastive", "learning", "manner", "to", "compress", "context", "features", "into", "90", "+", "%", "lower", "dimensional", "vectors", ".", "we", "then", "suggest", "a", "cluster", "-", "based", "pruning", "solution", "to", "filter", "out", "10", "%", "40", "%", "redundant", "nodes", "in", "large", "datastores", "while", "retaining", "translation", "quality", ".", "our", "proposed", "methods", "achieve", "better", "or", "comparable", "performance", "while", "reducing", "up", "to", "57", "%", "inference", "latency", "against", "the", "advanced", "non", "-", "parametric", "mt", "model", "on", "several", "machine", "translation", "benchmarks", ".", "experimental", "results", "indicate", "that", "the", "proposed", "methods", "maintain", "the", "most", "useful", "information", "of", "the", "original", "datastore", "and", "the", "compact", "network", "shows", "good", "generalization", "on", "unseen", "domains", ".", "codes", "are", "available", "at", "https", ":", "/", "/", "github", ".", "com", "/", "tjunlp", "-", "lab", "/", "pckmt", "."]}, {"venue": "ACL", "title": "How Helpful is Inverse Reinforcement Learning for Table-to-Text Generation?", "abstract": "Existing approaches for the Table-to-Text task suffer from issues such as missing information, hallucination and repetition. Many approaches to this problem use Reinforcement Learning (RL), which maximizes a single manually defined reward, such as BLEU. In this work, we instead pose the Table-to-Text task as Inverse Reinforcement Learning (IRL) problem. We explore using multiple interpretable unsupervised reward components that are combined linearly to form a composite reward function. The composite reward function and the description generator are learned jointly. We find that IRL outperforms strong RL baselines marginally. We further study the generalization of learned IRL rewards in scenarios involving domain adaptation. Our experiments reveal significant challenges in using IRL for this task.", "doc_id": "54dde4f9c065f4b52e4a45080d46cf79", "publication_year": 2021, "sentences": ["existing approaches for the table - to - text task suffer from issues such as missing information , hallucination and repetition .", "many approaches to this problem use reinforcement learning ( rl ) , which maximizes a single manually defined reward , such as bleu .", "in this work , we instead pose the table - to - text task as inverse reinforcement learning ( irl ) problem .", "we explore using multiple interpretable unsupervised reward components that are combined linearly to form a composite reward function .", "the composite reward function and the description generator are learned jointly .", "we find that irl outperforms strong rl baselines marginally .", "we further study the generalization of learned irl rewards in scenarios involving domain adaptation .", "our experiments reveal significant challenges in using irl for this task ."], "events": [{"event_type": "RWF", "arguments": [{"text": "approaches", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["approaches"], "offsets": [1]}, {"text": "the table - to - text task", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["the", "table", "-", "to", "-", "text", "task"], "offsets": [3, 4, 5, 6, 7, 8, 9]}, {"text": "missing information", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["missing", "information"], "offsets": [15, 16]}, {"text": "hallucination", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["hallucination"], "offsets": [18]}, {"text": "repetition", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["repetition"], "offsets": [20]}], "trigger": {"text": "suffer from", "tokens": ["suffer", "from"], "offsets": [10, 11]}}, {"event_type": "RWS", "arguments": [{"text": "reinforcement learning", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["reinforcement", "learning"], "offsets": [28, 29]}, {"text": "approaches", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["approaches"], "offsets": [23]}, {"text": "missing information , hallucination and repetition", "nugget_type": "WEA", "argument_type": "Target", "tokens": ["missing", "information", ",", "hallucination", "and", "repetition"], "offsets": [15, 16, 17, 18, 19, 20]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [27]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [50]}, {"text": "inverse reinforcement learning ( irl ) problem", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["inverse", "reinforcement", "learning", "problem"], "offsets": [61, 62, 63, 67]}], "trigger": {"text": "pose", "tokens": ["pose"], "offsets": [52]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [69]}, {"text": "multiple interpretable unsupervised reward components", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["multiple", "interpretable", "unsupervised", "reward", "components"], "offsets": [72, 73, 74, 75, 76]}, {"text": "combined linearly", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["combined", "linearly"], "offsets": [79, 80]}, {"text": "form", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["form"], "offsets": [82]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [71]}}, {"event_type": "WKS", "arguments": [{"text": "composite reward function", "nugget_type": "APP", "argument_type": "Content", "tokens": ["composite", "reward", "function"], "offsets": [89, 90, 91]}, {"text": "description generator", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["description", "generator"], "offsets": [94, 95]}, {"text": "jointly", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["jointly"], "offsets": [98]}], "trigger": {"text": "learned", "tokens": ["learned"], "offsets": [97]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [100]}, {"text": "outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforms"], "offsets": [104]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [101]}}, {"event_type": "CMP", "arguments": [{"text": "strong", "nugget_type": "STR", "argument_type": "Result", "tokens": ["strong"], "offsets": [105]}, {"text": "inverse reinforcement learning", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["inverse", "reinforcement", "learning"], "offsets": [61, 62, 63]}, {"text": "reinforcement learning", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["reinforcement", "learning"], "offsets": [28, 29]}, {"text": "baselines marginally", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["baselines", "marginally"], "offsets": [107, 108]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [104]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [110]}, {"text": "in scenarios involving domain adaptation", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "scenarios", "involving", "domain", "adaptation"], "offsets": [119, 120, 121, 122, 123]}, {"text": "generalization of learned irl rewards", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["generalization", "of", "learned", "irl", "rewards"], "offsets": [114, 115, 116, 117, 118]}], "trigger": {"text": "study", "tokens": ["study"], "offsets": [112]}}, {"event_type": "FAC", "arguments": [{"text": "experiments", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["experiments"], "offsets": [126]}, {"text": "challenges", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["challenges"], "offsets": [129]}, {"text": "significant", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significant"], "offsets": [128]}, {"text": "table - to - text task", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["table", "-", "to", "-", "text", "task"], "offsets": [4, 5, 6, 7, 8, 9]}], "trigger": {"text": "reveal", "tokens": ["reveal"], "offsets": [127]}}, {"event_type": "PUR", "arguments": [{"text": "composite reward function", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["composite", "reward", "function"], "offsets": [84, 85, 86]}], "trigger": {"text": "form", "tokens": ["form"], "offsets": [82]}}], "document": ["existing", "approaches", "for", "the", "table", "-", "to", "-", "text", "task", "suffer", "from", "issues", "such", "as", "missing", "information", ",", "hallucination", "and", "repetition", ".", "many", "approaches", "to", "this", "problem", "use", "reinforcement", "learning", "(", "rl", ")", ",", "which", "maximizes", "a", "single", "manually", "defined", "reward", ",", "such", "as", "bleu", ".", "in", "this", "work", ",", "we", "instead", "pose", "the", "table", "-", "to", "-", "text", "task", "as", "inverse", "reinforcement", "learning", "(", "irl", ")", "problem", ".", "we", "explore", "using", "multiple", "interpretable", "unsupervised", "reward", "components", "that", "are", "combined", "linearly", "to", "form", "a", "composite", "reward", "function", ".", "the", "composite", "reward", "function", "and", "the", "description", "generator", "are", "learned", "jointly", ".", "we", "find", "that", "irl", "outperforms", "strong", "rl", "baselines", "marginally", ".", "we", "further", "study", "the", "generalization", "of", "learned", "irl", "rewards", "in", "scenarios", "involving", "domain", "adaptation", ".", "our", "experiments", "reveal", "significant", "challenges", "in", "using", "irl", "for", "this", "task", "."]}, {"venue": "ACL", "title": "On the Calibration of Pre-trained Language Models using Mixup Guided by Area Under the Margin and Saliency", "abstract": "A well-calibrated neural model produces confidence (probability outputs) closely approximated by the expected accuracy. While prior studies have shown that mixup training as a data augmentation technique can improve model calibration on image classification tasks, little is known about using mixup for model calibration on natural language understanding (NLU) tasks. In this paper, we explore mixup for model calibration on several NLU tasks and propose a novel mixup strategy for pre-trained language models that improves model calibration further. Our proposed mixup is guided by both the Area Under the Margin (AUM) statistic (Pleiss et al., 2020) and the saliency map of each sample (Simonyan et al., 2013). Moreover, we combine our mixup strategy with model miscalibration correction techniques (i.e., label smoothing and temperature scaling) and provide detailed analyses of their impact on our proposed mixup. We focus on systematically designing experiments on three NLU tasks: natural language inference, paraphrase detection, and commonsense reasoning. Our method achieves the lowest expected calibration error compared to strong baselines on both in-domain and out-of-domain test samples while maintaining competitive accuracy.", "doc_id": "efc282191cc4cdc446d41240ccc73fe7", "publication_year": 2022, "sentences": ["a well - calibrated neural model produces confidence ( probability outputs ) closely approximated by the expected accuracy .", "while prior studies have shown that mixup training as a data augmentation technique can improve model calibration on image classification tasks , little is known about using mixup for model calibration on natural language understanding ( nlu ) tasks .", "in this paper , we explore mixup for model calibration on several nlu tasks and propose a novel mixup strategy for pre - trained language models that improves model calibration further .", "our proposed mixup is guided by both the area under the margin ( aum ) statistic ( pleiss et al . , 2020 ) and the saliency map of each sample ( simonyan et al . , 2013 ) .", "moreover , we combine our mixup strategy with model miscalibration correction techniques ( i . e . , label smoothing and temperature scaling ) and provide detailed analyses of their impact on our proposed mixup .", "we focus on systematically designing experiments on three nlu tasks : natural language inference , paraphrase detection , and commonsense reasoning .", "our method achieves the lowest expected calibration error compared to strong baselines on both in - domain and out - of - domain test samples while maintaining competitive accuracy ."], "events": [{"event_type": "ITT", "arguments": [{"text": "mixup training", "nugget_type": "APP", "argument_type": "Target", "tokens": ["mixup", "training"], "offsets": [25, 26]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [33]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [63]}, {"text": "mixup", "nugget_type": "APP", "argument_type": "Content", "tokens": ["mixup"], "offsets": [65]}, {"text": "model calibration", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["model", "calibration"], "offsets": [67, 68]}, {"text": "on several nlu tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "several", "nlu", "tasks"], "offsets": [69, 70, 71, 72]}], "trigger": {"text": "explore", "tokens": ["explore"], "offsets": [64]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [63]}, {"text": "mixup strategy for pre - trained language models", "nugget_type": "APP", "argument_type": "Content", "tokens": ["mixup", "strategy", "for", "pre", "-", "trained", "language", "models"], "offsets": [77, 78, 79, 80, 81, 82, 83, 84]}, {"text": "improves", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improves"], "offsets": [86]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [74]}}, {"event_type": "PUR", "arguments": [{"text": "model calibration", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["model", "calibration"], "offsets": [87, 88]}], "trigger": {"text": "improves", "tokens": ["improves"], "offsets": [86]}}, {"event_type": "MDS", "arguments": [{"text": "area under the margin ( aum ) statistic", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["area", "under", "the", "margin", "statistic"], "offsets": [99, 100, 101, 102, 106]}, {"text": "saliency map of each sample", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["saliency", "map", "of", "each", "sample"], "offsets": [117, 118, 119, 120, 121]}], "trigger": {"text": "guided", "tokens": ["guided"], "offsets": [95]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [133]}, {"text": "mixup strategy", "nugget_type": "APP", "argument_type": "Content", "tokens": ["mixup", "strategy"], "offsets": [136, 137]}, {"text": "model miscalibration correction techniques", "nugget_type": "APP", "argument_type": "Content", "tokens": ["model", "miscalibration", "correction", "techniques"], "offsets": [139, 140, 141, 142]}], "trigger": {"text": "combine", "tokens": ["combine"], "offsets": [134]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [133]}, {"text": "detailed analyses of their impact", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["detailed", "analyses", "of", "model", "miscalibration", "correction", "techniques", "impact"], "offsets": [157, 158, 159, 139, 140, 141, 142, 161]}, {"text": "on our proposed mixup", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "our", "proposed", "mixup"], "offsets": [162, 163, 164, 165]}], "trigger": {"text": "provide", "tokens": ["provide"], "offsets": [156]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [167]}, {"text": "systematically", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["systematically"], "offsets": [170]}, {"text": "experiments", "nugget_type": "APP", "argument_type": "Content", "tokens": ["experiments"], "offsets": [172]}, {"text": "natural language inference , paraphrase detection , and commonsense reasoning", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["natural", "language", "inference", ",", "paraphrase", "detection", ",", "and", "commonsense", "reasoning"], "offsets": [178, 179, 180, 181, 182, 183, 184, 185, 186, 187]}], "trigger": {"text": "designing", "tokens": ["designing"], "offsets": [171]}}, {"event_type": "CMP", "arguments": [{"text": "strong baselines on both in - domain and out - of - domain test samples", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["strong", "baselines", "on", "both", "in", "-", "domain", "and", "out", "-", "of", "-", "domain", "test", "samples"], "offsets": [199, 200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211, 212, 213]}, {"text": "method", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["method"], "offsets": [190]}, {"text": "lowest", "nugget_type": "STR", "argument_type": "Result", "tokens": ["lowest"], "offsets": [193]}, {"text": "expected calibration error", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["expected", "calibration", "error"], "offsets": [194, 195, 196]}, {"text": "competitive", "nugget_type": "STR", "argument_type": "Result", "tokens": ["competitive"], "offsets": [216]}, {"text": "accuracy", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["accuracy"], "offsets": [217]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [191]}}], "document": ["a", "well", "-", "calibrated", "neural", "model", "produces", "confidence", "(", "probability", "outputs", ")", "closely", "approximated", "by", "the", "expected", "accuracy", ".", "while", "prior", "studies", "have", "shown", "that", "mixup", "training", "as", "a", "data", "augmentation", "technique", "can", "improve", "model", "calibration", "on", "image", "classification", "tasks", ",", "little", "is", "known", "about", "using", "mixup", "for", "model", "calibration", "on", "natural", "language", "understanding", "(", "nlu", ")", "tasks", ".", "in", "this", "paper", ",", "we", "explore", "mixup", "for", "model", "calibration", "on", "several", "nlu", "tasks", "and", "propose", "a", "novel", "mixup", "strategy", "for", "pre", "-", "trained", "language", "models", "that", "improves", "model", "calibration", "further", ".", "our", "proposed", "mixup", "is", "guided", "by", "both", "the", "area", "under", "the", "margin", "(", "aum", ")", "statistic", "(", "pleiss", "et", "al", ".", ",", "2020", ")", "and", "the", "saliency", "map", "of", "each", "sample", "(", "simonyan", "et", "al", ".", ",", "2013", ")", ".", "moreover", ",", "we", "combine", "our", "mixup", "strategy", "with", "model", "miscalibration", "correction", "techniques", "(", "i", ".", "e", ".", ",", "label", "smoothing", "and", "temperature", "scaling", ")", "and", "provide", "detailed", "analyses", "of", "their", "impact", "on", "our", "proposed", "mixup", ".", "we", "focus", "on", "systematically", "designing", "experiments", "on", "three", "nlu", "tasks", ":", "natural", "language", "inference", ",", "paraphrase", "detection", ",", "and", "commonsense", "reasoning", ".", "our", "method", "achieves", "the", "lowest", "expected", "calibration", "error", "compared", "to", "strong", "baselines", "on", "both", "in", "-", "domain", "and", "out", "-", "of", "-", "domain", "test", "samples", "while", "maintaining", "competitive", "accuracy", "."]}, {"venue": "ACL", "title": "A Top-down Neural Architecture towards Text-level Parsing of Discourse Rhetorical Structure", "abstract": "Due to its great importance in deep natural language understanding and various down-stream applications, text-level parsing of discourse rhetorical structure (DRS) has been drawing more and more attention in recent years. However, all the previous studies on text-level discourse parsing adopt bottom-up approaches, which much limit the DRS determination on local information and fail to well benefit from global information of the overall discourse. In this paper, we justify from both computational and perceptive points-of-view that the top-down architecture is more suitable for text-level DRS parsing. On the basis, we propose a top-down neural architecture toward text-level DRS parsing. In particular, we cast discourse parsing as a recursive split point ranking task, where a split point is classified to different levels according to its rank and the elementary discourse units (EDUs) associated with it are arranged accordingly. In this way, we can determine the complete DRS as a hierarchical tree structure via an encoder-decoder with an internal stack. Experimentation on both the English RST-DT corpus and the Chinese CDTB corpus shows the great effectiveness of our proposed top-down approach towards text-level DRS parsing.", "doc_id": "e14b2791db78babcd86294ff6557d2fc", "publication_year": 2020, "sentences": ["due to its great importance in deep natural language understanding and various down - stream applications , text - level parsing of discourse rhetorical structure ( drs ) has been drawing more and more attention in recent years .", "however , all the previous studies on text - level discourse parsing adopt bottom - up approaches , which much limit the drs determination on local information and fail to well benefit from global information of the overall discourse .", "in this paper , we justify from both computational and perceptive points - of - view that the top - down architecture is more suitable for text - level drs parsing .", "on the basis , we propose a top - down neural architecture toward text - level drs parsing .", "in particular , we cast discourse parsing as a recursive split point ranking task , where a split point is classified to different levels according to its rank and the elementary discourse units ( edus ) associated with it are arranged accordingly .", "in this way , we can determine the complete drs as a hierarchical tree structure via an encoder - decoder with an internal stack .", "experimentation on both the english rst - dt corpus and the chinese cdtb corpus shows the great effectiveness of our proposed top - down approach towards text - level drs parsing ."], "events": [{"event_type": "ITT", "arguments": [{"text": "text - level parsing", "nugget_type": "APP", "argument_type": "Target", "tokens": ["text", "-", "level", "parsing"], "offsets": [17, 18, 19, 20]}], "trigger": {"text": "drawing", "tokens": ["drawing"], "offsets": [30]}}, {"event_type": "RWF", "arguments": [{"text": "bottom - up approaches", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["bottom", "-", "up", "approaches"], "offsets": [52, 53, 54, 55]}, {"text": "limit", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["limit"], "offsets": [59]}], "trigger": {"text": "limit", "tokens": ["limit"], "offsets": [59]}}, {"event_type": "RWF", "arguments": [{"text": "bottom - up approaches", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["bottom", "-", "up", "approaches"], "offsets": [52, 53, 54, 55]}, {"text": "fail", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["fail"], "offsets": [67]}], "trigger": {"text": "fail", "tokens": ["fail"], "offsets": [67]}}, {"event_type": "WKS", "arguments": [{"text": "from both computational and perceptive points - of - view", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "both", "computational", "and", "perceptive", "points", "-", "of", "-", "view"], "offsets": [85, 86, 87, 88, 89, 90, 91, 92, 93, 94]}, {"text": "top - down architecture", "nugget_type": "APP", "argument_type": "Content", "tokens": ["top", "-", "down", "architecture"], "offsets": [97, 98, 99, 100]}, {"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [83]}, {"text": "text - level drs parsing", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["text", "-", "level", "drs", "parsing"], "offsets": [105, 106, 107, 108, 109]}], "trigger": {"text": "justify", "tokens": ["justify"], "offsets": [84]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [115]}, {"text": "top - down neural architecture", "nugget_type": "APP", "argument_type": "Content", "tokens": ["top", "-", "down", "neural", "architecture"], "offsets": [118, 119, 120, 121, 122]}, {"text": "text - level drs parsing", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["text", "-", "level", "discourse", "rhetorical", "structure", "parsing"], "offsets": [124, 125, 126, 22, 23, 24, 128]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [116]}}, {"event_type": "MDS", "arguments": [{"text": "discourse parsing", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["discourse", "parsing"], "offsets": [135, 136]}, {"text": "recursive split point ranking task", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["recursive", "split", "point", "ranking", "task"], "offsets": [139, 140, 141, 142, 143]}], "trigger": {"text": "cast", "tokens": ["cast"], "offsets": [134]}}, {"event_type": "MDS", "arguments": [{"text": "split point", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["split", "point"], "offsets": [147, 148]}, {"text": "according to its rank", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["according", "to", "split", "point", "rank"], "offsets": [154, 155, 147, 148, 157]}, {"text": "different levels", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["different", "levels"], "offsets": [152, 153]}], "trigger": {"text": "classified", "tokens": ["classified"], "offsets": [150]}}, {"event_type": "MDS", "arguments": [{"text": "elementary discourse units", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["elementary", "discourse", "units"], "offsets": [160, 161, 162]}], "trigger": {"text": "arranged", "tokens": ["arranged"], "offsets": [170]}}, {"event_type": "MDS", "arguments": [{"text": "encoder - decoder", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["encoder", "-", "decoder"], "offsets": [190, 191, 192]}, {"text": "hierarchical tree structure", "nugget_type": "MOD", "argument_type": "Target", "tokens": ["hierarchical", "tree", "structure"], "offsets": [185, 186, 187]}, {"text": "complete drs", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["complete", "discourse", "rhetorical", "structure"], "offsets": [181, 22, 23, 24]}, {"text": "with an internal stack", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "an", "internal", "stack"], "offsets": [193, 194, 195, 196]}], "trigger": {"text": "determine", "tokens": ["determine"], "offsets": [179]}}, {"event_type": "FAC", "arguments": [{"text": "english rst - dt corpus", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["english", "rst", "-", "dt", "corpus"], "offsets": [202, 203, 204, 205, 206]}, {"text": "chinese cdtb corpus", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["chinese", "cdtb", "corpus"], "offsets": [209, 210, 211]}, {"text": "text - level drs parsing", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["text", "-", "level", "discourse", "rhetorical", "structure", "parsing"], "offsets": [224, 225, 226, 22, 23, 24, 228]}, {"text": "great effectiveness of our proposed top - down approach", "nugget_type": "STR", "argument_type": "Object", "tokens": ["great", "effectiveness", "of", "our", "proposed", "top", "-", "down", "approach"], "offsets": [214, 215, 216, 217, 218, 219, 220, 221, 222]}], "trigger": {"text": "shows", "tokens": ["shows"], "offsets": [212]}}, {"event_type": "PUR", "arguments": [{"text": "from global information of the overall discourse", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "global", "information", "of", "the", "overall", "discourse"], "offsets": [71, 72, 73, 74, 75, 76, 77]}], "trigger": {"text": "well benefit", "tokens": ["well", "benefit"], "offsets": [69, 70]}}], "document": ["due", "to", "its", "great", "importance", "in", "deep", "natural", "language", "understanding", "and", "various", "down", "-", "stream", "applications", ",", "text", "-", "level", "parsing", "of", "discourse", "rhetorical", "structure", "(", "drs", ")", "has", "been", "drawing", "more", "and", "more", "attention", "in", "recent", "years", ".", "however", ",", "all", "the", "previous", "studies", "on", "text", "-", "level", "discourse", "parsing", "adopt", "bottom", "-", "up", "approaches", ",", "which", "much", "limit", "the", "drs", "determination", "on", "local", "information", "and", "fail", "to", "well", "benefit", "from", "global", "information", "of", "the", "overall", "discourse", ".", "in", "this", "paper", ",", "we", "justify", "from", "both", "computational", "and", "perceptive", "points", "-", "of", "-", "view", "that", "the", "top", "-", "down", "architecture", "is", "more", "suitable", "for", "text", "-", "level", "drs", "parsing", ".", "on", "the", "basis", ",", "we", "propose", "a", "top", "-", "down", "neural", "architecture", "toward", "text", "-", "level", "drs", "parsing", ".", "in", "particular", ",", "we", "cast", "discourse", "parsing", "as", "a", "recursive", "split", "point", "ranking", "task", ",", "where", "a", "split", "point", "is", "classified", "to", "different", "levels", "according", "to", "its", "rank", "and", "the", "elementary", "discourse", "units", "(", "edus", ")", "associated", "with", "it", "are", "arranged", "accordingly", ".", "in", "this", "way", ",", "we", "can", "determine", "the", "complete", "drs", "as", "a", "hierarchical", "tree", "structure", "via", "an", "encoder", "-", "decoder", "with", "an", "internal", "stack", ".", "experimentation", "on", "both", "the", "english", "rst", "-", "dt", "corpus", "and", "the", "chinese", "cdtb", "corpus", "shows", "the", "great", "effectiveness", "of", "our", "proposed", "top", "-", "down", "approach", "towards", "text", "-", "level", "drs", "parsing", "."]}, {"venue": "ACL", "title": "SciNLI: A Corpus for Natural Language Inference on Scientific Text", "abstract": "Existing Natural Language Inference (NLI) datasets, while being instrumental in the advancement of Natural Language Understanding (NLU) research, are not related to scientific text. In this paper, we introduce SciNLI, a large dataset for NLI that captures the formality in scientific text and contains 107,412 sentence pairs extracted from scholarly papers on NLP and computational linguistics. Given that the text used in scientific literature differs vastly from the text used in everyday language both in terms of vocabulary and sentence structure, our dataset is well suited to serve as a benchmark for the evaluation of scientific NLU models. Our experiments show that SciNLI is harder to classify than the existing NLI datasets. Our best performing model with XLNet achieves a Macro F1 score of only 78.18% and an accuracy of 78.23% showing that there is substantial room for improvement.", "doc_id": "fcac34855f7d377f2fbd48370f3ac928", "publication_year": 2022, "sentences": ["existing natural language inference ( nli ) datasets , while being instrumental in the advancement of natural language understanding ( nlu ) research , are not related to scientific text .", "in this paper , we introduce scinli , a large dataset for nli that captures the formality in scientific text and contains 107 , 412 sentence pairs extracted from scholarly papers on nlp and computational linguistics .", "given that the text used in scientific literature differs vastly from the text used in everyday language both in terms of vocabulary and sentence structure , our dataset is well suited to serve as a benchmark for the evaluation of scientific nlu models .", "our experiments show that scinli is harder to classify than the existing nli datasets .", "our best performing model with xlnet achieves a macro f1 score of only 78 . 18 % and an accuracy of 78 . 23 % showing that there is substantial room for improvement ."], "events": [{"event_type": "ITT", "arguments": [{"text": "natural language understanding", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["natural", "language", "understanding"], "offsets": [16, 17, 18]}], "trigger": {"text": "related", "tokens": ["related"], "offsets": [26]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [35]}, {"text": "large dataset for nli", "nugget_type": "DST", "argument_type": "Content", "tokens": ["large", "dataset", "for", "natural", "language", "inference"], "offsets": [40, 41, 42, 1, 2, 3]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [36]}}, {"event_type": "FIN", "arguments": [{"text": "than", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["than"], "offsets": [121]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [114]}}, {"event_type": "CMP", "arguments": [{"text": "existing nli datasets", "nugget_type": "DST", "argument_type": "Arg2", "tokens": ["existing", "natural", "language", "inference", "datasets"], "offsets": [123, 1, 2, 3, 125]}, {"text": "scinli", "nugget_type": "DST", "argument_type": "Arg1", "tokens": ["scinli"], "offsets": [116]}, {"text": "harder to classify", "nugget_type": "WEA", "argument_type": "Result", "tokens": ["harder", "to", "classify"], "offsets": [118, 119, 120]}], "trigger": {"text": "than", "tokens": ["than"], "offsets": [121]}}, {"event_type": "FAC", "arguments": [{"text": "best performing model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["best", "performing", "model"], "offsets": [128, 129, 130]}, {"text": "78 . 18 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["78", ".", "18", "%"], "offsets": [140, 141, 142, 143]}, {"text": "macro f1 score", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["macro", "f1", "score"], "offsets": [135, 136, 137]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [133]}}, {"event_type": "FAC", "arguments": [{"text": "best performing model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["best", "performing", "model"], "offsets": [128, 129, 130]}, {"text": "accuracy", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["accuracy"], "offsets": [146]}, {"text": "78 . 23 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["78", ".", "23", "%"], "offsets": [148, 149, 150, 151]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [133]}}, {"event_type": "FAC", "arguments": [{"text": "substantial room for improvement", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["substantial", "room", "for", "improvement"], "offsets": [156, 157, 158, 159]}], "trigger": {"text": "showing", "tokens": ["showing"], "offsets": [152]}}], "document": ["existing", "natural", "language", "inference", "(", "nli", ")", "datasets", ",", "while", "being", "instrumental", "in", "the", "advancement", "of", "natural", "language", "understanding", "(", "nlu", ")", "research", ",", "are", "not", "related", "to", "scientific", "text", ".", "in", "this", "paper", ",", "we", "introduce", "scinli", ",", "a", "large", "dataset", "for", "nli", "that", "captures", "the", "formality", "in", "scientific", "text", "and", "contains", "107", ",", "412", "sentence", "pairs", "extracted", "from", "scholarly", "papers", "on", "nlp", "and", "computational", "linguistics", ".", "given", "that", "the", "text", "used", "in", "scientific", "literature", "differs", "vastly", "from", "the", "text", "used", "in", "everyday", "language", "both", "in", "terms", "of", "vocabulary", "and", "sentence", "structure", ",", "our", "dataset", "is", "well", "suited", "to", "serve", "as", "a", "benchmark", "for", "the", "evaluation", "of", "scientific", "nlu", "models", ".", "our", "experiments", "show", "that", "scinli", "is", "harder", "to", "classify", "than", "the", "existing", "nli", "datasets", ".", "our", "best", "performing", "model", "with", "xlnet", "achieves", "a", "macro", "f1", "score", "of", "only", "78", ".", "18", "%", "and", "an", "accuracy", "of", "78", ".", "23", "%", "showing", "that", "there", "is", "substantial", "room", "for", "improvement", "."]}, {"venue": "ACL", "title": "Neural Stylistic Response Generation with Disentangled Latent Variables", "abstract": "Generating open-domain conversational responses in the desired style usually suffers from the lack of parallel data in the style. Meanwhile, using monolingual stylistic data to increase style intensity often leads to the expense of decreasing content relevance. In this paper, we propose to disentangle the content and style in latent space by diluting sentence-level information in style representations. Combining the desired style representation and a response content representation will then obtain a stylistic response. Our approach achieves a higher BERT-based style intensity score and comparable BLEU scores, compared with baselines. Human evaluation results show that our approach significantly improves style intensity and maintains content relevance.", "doc_id": "b04949b29e46d02b9d1ab600ab2f28f6", "publication_year": 2021, "sentences": ["generating open - domain conversational responses in the desired style usually suffers from the lack of parallel data in the style .", "meanwhile , using monolingual stylistic data to increase style intensity often leads to the expense of decreasing content relevance .", "in this paper , we propose to disentangle the content and style in latent space by diluting sentence - level information in style representations .", "combining the desired style representation and a response content representation will then obtain a stylistic response .", "our approach achieves a higher bert - based style intensity score and comparable bleu scores , compared with baselines .", "human evaluation results show that our approach significantly improves style intensity and maintains content relevance ."], "events": [{"event_type": "RWF", "arguments": [{"text": "suffers from", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["suffers", "from"], "offsets": [11, 12]}, {"text": "lack of parallel data", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["lack", "of", "parallel", "data"], "offsets": [14, 15, 16, 17]}, {"text": "in the desired style", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "desired", "style"], "offsets": [6, 7, 8, 9]}], "trigger": {"text": "suffers from", "tokens": ["suffers", "from"], "offsets": [11, 12]}}, {"event_type": "RWF", "arguments": [{"text": "increase", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["increase"], "offsets": [29]}, {"text": "monolingual stylistic data", "nugget_type": "DST", "argument_type": "Concern", "tokens": ["monolingual", "stylistic", "data"], "offsets": [25, 26, 27]}, {"text": "expense of decreasing content relevance", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["expense", "of", "decreasing", "content", "relevance"], "offsets": [36, 37, 38, 39, 40]}], "trigger": {"text": "leads", "tokens": ["leads"], "offsets": [33]}}, {"event_type": "WKS", "arguments": [{"text": "sentence - level information in style representations", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["sentence", "-", "level", "information", "in", "style", "representations"], "offsets": [59, 60, 61, 62, 63, 64, 65]}, {"text": "disentangle", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["disentangle"], "offsets": [49]}], "trigger": {"text": "diluting", "tokens": ["diluting"], "offsets": [58]}}, {"event_type": "PUR", "arguments": [{"text": "content", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["content"], "offsets": [51]}, {"text": "style", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["style"], "offsets": [53]}, {"text": "in latent space", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "latent", "space"], "offsets": [54, 55, 56]}], "trigger": {"text": "disentangle", "tokens": ["disentangle"], "offsets": [49]}}, {"event_type": "MDS", "arguments": [{"text": "desired style representation", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["desired", "style", "representation"], "offsets": [69, 70, 71]}, {"text": "response content representation", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["response", "content", "representation"], "offsets": [74, 75, 76]}, {"text": "stylistic response", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["stylistic", "response"], "offsets": [81, 82]}], "trigger": {"text": "combining", "tokens": ["combining"], "offsets": [67]}}, {"event_type": "CMP", "arguments": [{"text": "approach", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["approach"], "offsets": [85]}, {"text": "baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["baselines"], "offsets": [102]}, {"text": "higher", "nugget_type": "STR", "argument_type": "Result", "tokens": ["higher"], "offsets": [88]}, {"text": "bert - based style intensity score", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["bert", "-", "based", "style", "intensity", "score"], "offsets": [89, 90, 91, 92, 93, 94]}, {"text": "comparable", "nugget_type": "STR", "argument_type": "Result", "tokens": ["comparable"], "offsets": [96]}, {"text": "bleu scores", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["bleu", "scores"], "offsets": [97, 98]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [86]}}, {"event_type": "FIN", "arguments": [{"text": "significantly improves", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["significantly", "improves"], "offsets": [111, 112]}, {"text": "maintains", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["maintains"], "offsets": [116]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [107]}}, {"event_type": "FAC", "arguments": [{"text": "approach", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["approach"], "offsets": [110]}, {"text": "style intensity", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["style", "intensity"], "offsets": [113, 114]}], "trigger": {"text": "significantly improves", "tokens": ["significantly", "improves"], "offsets": [111, 112]}}, {"event_type": "FAC", "arguments": [{"text": "approach", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["approach"], "offsets": [110]}, {"text": "content relevance", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["content", "relevance"], "offsets": [117, 118]}], "trigger": {"text": "maintains", "tokens": ["maintains"], "offsets": [116]}}, {"event_type": "PUR", "arguments": [{"text": "open - domain conversational responses", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["open", "-", "domain", "conversational", "responses"], "offsets": [1, 2, 3, 4, 5]}], "trigger": {"text": "generating", "tokens": ["generating"], "offsets": [0]}}, {"event_type": "PUR", "arguments": [{"text": "style intensity", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["style", "intensity"], "offsets": [30, 31]}], "trigger": {"text": "increase", "tokens": ["increase"], "offsets": [29]}}], "document": ["generating", "open", "-", "domain", "conversational", "responses", "in", "the", "desired", "style", "usually", "suffers", "from", "the", "lack", "of", "parallel", "data", "in", "the", "style", ".", "meanwhile", ",", "using", "monolingual", "stylistic", "data", "to", "increase", "style", "intensity", "often", "leads", "to", "the", "expense", "of", "decreasing", "content", "relevance", ".", "in", "this", "paper", ",", "we", "propose", "to", "disentangle", "the", "content", "and", "style", "in", "latent", "space", "by", "diluting", "sentence", "-", "level", "information", "in", "style", "representations", ".", "combining", "the", "desired", "style", "representation", "and", "a", "response", "content", "representation", "will", "then", "obtain", "a", "stylistic", "response", ".", "our", "approach", "achieves", "a", "higher", "bert", "-", "based", "style", "intensity", "score", "and", "comparable", "bleu", "scores", ",", "compared", "with", "baselines", ".", "human", "evaluation", "results", "show", "that", "our", "approach", "significantly", "improves", "style", "intensity", "and", "maintains", "content", "relevance", "."]}, {"venue": "ACL", "title": "UniRE: A Unified Label Space for Entity Relation Extraction", "abstract": "Many joint entity relation extraction models setup two separated label spaces for the two sub-tasks (i.e., entity detection and relation classification). We argue that this setting may hinder the information interaction between entities and relations. In this work, we propose to eliminate the different treatment on the two sub-tasks\u2019 label spaces. The input of our model is a table containing all word pairs from a sentence. Entities and relations are represented by squares and rectangles in the table. We apply a unified classifier to predict each cell\u2019s label, which unifies the learning of two sub-tasks. For testing, an effective (yet fast) approximate decoder is proposed for finding squares and rectangles from tables. Experiments on three benchmarks (ACE04, ACE05, SciERC) show that, using only half the number of parameters, our model achieves competitive accuracy with the best extractor, and is faster.", "doc_id": "d642a8faec63660236044c9ac1faa9d3", "publication_year": 2021, "sentences": ["many joint entity relation extraction models setup two separated label spaces for the two sub - tasks ( i . e . , entity detection and relation classification ) .", "we argue that this setting may hinder the information interaction between entities and relations .", "in this work , we propose to eliminate the different treatment on the two sub - tasks \u2019 label spaces .", "the input of our model is a table containing all word pairs from a sentence .", "entities and relations are represented by squares and rectangles in the table .", "we apply a unified classifier to predict each cell \u2019 s label , which unifies the learning of two sub - tasks .", "for testing , an effective ( yet fast ) approximate decoder is proposed for finding squares and rectangles from tables .", "experiments on three benchmarks ( ace04 , ace05 , scierc ) show that , using only half the number of parameters , our model achieves competitive accuracy with the best extractor , and is faster ."], "events": [{"event_type": "RWS", "arguments": [{"text": "two sub - tasks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["two", "sub", "-", "tasks"], "offsets": [13, 14, 15, 16]}, {"text": "joint entity relation extraction models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["joint", "entity", "relation", "extraction", "models"], "offsets": [1, 2, 3, 4, 5]}, {"text": "two separated label spaces", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["two", "separated", "label", "spaces"], "offsets": [7, 8, 9, 10]}], "trigger": {"text": "setup", "tokens": ["setup"], "offsets": [6]}}, {"event_type": "RWF", "arguments": [{"text": "hinder", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["hinder"], "offsets": [36]}], "trigger": {"text": "hinder", "tokens": ["hinder"], "offsets": [36]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [49]}, {"text": "different treatment", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["different", "treatment"], "offsets": [54, 55]}, {"text": "on the two sub - tasks \u2019 label spaces", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "the", "two", "sub", "-", "tasks", "\u2019", "label", "spaces"], "offsets": [56, 57, 58, 59, 60, 61, 62, 63, 64]}], "trigger": {"text": "eliminate", "tokens": ["eliminate"], "offsets": [52]}}, {"event_type": "MDS", "arguments": [{"text": "input", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["input"], "offsets": [67]}, {"text": "table", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["table"], "offsets": [73]}, {"text": "all word pairs", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["all", "word", "pairs"], "offsets": [75, 76, 77]}], "trigger": {"text": "containing", "tokens": ["containing"], "offsets": [74]}}, {"event_type": "MDS", "arguments": [{"text": "entities and relations", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["entities", "and", "relations"], "offsets": [82, 83, 84]}, {"text": "squares and rectangles", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["squares", "and", "rectangles"], "offsets": [88, 89, 90]}, {"text": "in the table", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "table"], "offsets": [91, 92, 93]}], "trigger": {"text": "represented", "tokens": ["represented"], "offsets": [86]}}, {"event_type": "MDS", "arguments": [{"text": "unified classifier", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["unified", "classifier"], "offsets": [98, 99]}, {"text": "unifies", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["unifies"], "offsets": [109]}, {"text": "each cell \u2019 s label", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["each", "cell", "\u2019", "s", "label"], "offsets": [102, 103, 104, 105, 106]}], "trigger": {"text": "predict", "tokens": ["predict"], "offsets": [101]}}, {"event_type": "PUR", "arguments": [{"text": "learning of two sub - tasks", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["learning", "of", "two", "sub", "-", "tasks"], "offsets": [111, 112, 113, 114, 115, 116]}], "trigger": {"text": "unifies", "tokens": ["unifies"], "offsets": [109]}}, {"event_type": "PRP", "arguments": [{"text": "approximate decoder", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["approximate", "decoder"], "offsets": [127, 128]}, {"text": "finding", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["finding"], "offsets": [132]}], "trigger": {"text": "proposed", "tokens": ["proposed"], "offsets": [130]}}, {"event_type": "PUR", "arguments": [{"text": "squares and rectangles", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["squares", "and", "rectangles"], "offsets": [133, 134, 135]}, {"text": "from tables", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "tables"], "offsets": [136, 137]}], "trigger": {"text": "finding", "tokens": ["finding"], "offsets": [132]}}, {"event_type": "FAC", "arguments": [{"text": "on three benchmarks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "three", "benchmarks"], "offsets": [140, 141, 142]}, {"text": "using only half the number of parameters", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["using", "only", "half", "the", "number", "of", "parameters"], "offsets": [153, 154, 155, 156, 157, 158, 159]}, {"text": "competitive accuracy", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["competitive", "accuracy"], "offsets": [164, 165]}, {"text": "faster", "nugget_type": "STR", "argument_type": "Object", "tokens": ["faster"], "offsets": [173]}, {"text": "with the best extractor", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "the", "best", "extractor"], "offsets": [166, 167, 168, 169]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [163]}}], "document": ["many", "joint", "entity", "relation", "extraction", "models", "setup", "two", "separated", "label", "spaces", "for", "the", "two", "sub", "-", "tasks", "(", "i", ".", "e", ".", ",", "entity", "detection", "and", "relation", "classification", ")", ".", "we", "argue", "that", "this", "setting", "may", "hinder", "the", "information", "interaction", "between", "entities", "and", "relations", ".", "in", "this", "work", ",", "we", "propose", "to", "eliminate", "the", "different", "treatment", "on", "the", "two", "sub", "-", "tasks", "\u2019", "label", "spaces", ".", "the", "input", "of", "our", "model", "is", "a", "table", "containing", "all", "word", "pairs", "from", "a", "sentence", ".", "entities", "and", "relations", "are", "represented", "by", "squares", "and", "rectangles", "in", "the", "table", ".", "we", "apply", "a", "unified", "classifier", "to", "predict", "each", "cell", "\u2019", "s", "label", ",", "which", "unifies", "the", "learning", "of", "two", "sub", "-", "tasks", ".", "for", "testing", ",", "an", "effective", "(", "yet", "fast", ")", "approximate", "decoder", "is", "proposed", "for", "finding", "squares", "and", "rectangles", "from", "tables", ".", "experiments", "on", "three", "benchmarks", "(", "ace04", ",", "ace05", ",", "scierc", ")", "show", "that", ",", "using", "only", "half", "the", "number", "of", "parameters", ",", "our", "model", "achieves", "competitive", "accuracy", "with", "the", "best", "extractor", ",", "and", "is", "faster", "."]}, {"venue": "ACL", "title": "More Identifiable yet Equally Performant Transformers for Text Classification", "abstract": "Interpretability is an important aspect of the trustworthiness of a model\u2019s predictions. Transformer\u2019s predictions are widely explained by the attention weights, i.e., a probability distribution generated at its self-attention unit (head). Current empirical studies provide shreds of evidence that attention weights are not explanations by proving that they are not unique. A recent study showed theoretical justifications to this observation by proving the non-identifiability of attention weights. For a given input to a head and its output, if the attention weights generated in it are unique, we call the weights identifiable. In this work, we provide deeper theoretical analysis and empirical observations on the identifiability of attention weights. Ignored in the previous works, we find the attention weights are more identifiable than we currently perceive by uncovering the hidden role of the key vector. However, the weights are still prone to be non-unique attentions that make them unfit for interpretation. To tackle this issue, we provide a variant of the encoder layer that decouples the relationship between key and value vector and provides identifiable weights up to the desired length of the input. We prove the applicability of such variations by providing empirical justifications on varied text classification tasks. The implementations are available at https://github.com/declare-lab/identifiable-transformers.", "doc_id": "b5366cd73d89a8df1834afb4a8b84a5b", "publication_year": 2021, "sentences": ["interpretability is an important aspect of the trustworthiness of a model \u2019 s predictions .", "transformer \u2019 s predictions are widely explained by the attention weights , i . e . , a probability distribution generated at its self - attention unit ( head ) .", "current empirical studies provide shreds of evidence that attention weights are not explanations by proving that they are not unique .", "a recent study showed theoretical justifications to this observation by proving the non - identifiability of attention weights .", "for a given input to a head and its output , if the attention weights generated in it are unique , we call the weights identifiable .", "in this work , we provide deeper theoretical analysis and empirical observations on the identifiability of attention weights .", "ignored in the previous works , we find the attention weights are more identifiable than we currently perceive by uncovering the hidden role of the key vector .", "however , the weights are still prone to be non - unique attentions that make them unfit for interpretation .", "to tackle this issue , we provide a variant of the encoder layer that decouples the relationship between key and value vector and provides identifiable weights up to the desired length of the input .", "we prove the applicability of such variations by providing empirical justifications on varied text classification tasks .", "the implementations are available at https : / / github . com / declare - lab / identifiable - transformers ."], "events": [{"event_type": "ITT", "arguments": [{"text": "interpretability", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["interpretability"], "offsets": [0]}], "trigger": {"text": "aspect", "tokens": ["aspect"], "offsets": [4]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [117]}, {"text": "deeper theoretical analysis", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["deeper", "theoretical", "analysis"], "offsets": [119, 120, 121]}, {"text": "identifiability of attention weights", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["identifiability", "of", "attention", "weights"], "offsets": [127, 128, 129, 130]}, {"text": "empirical observations", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["empirical", "observations"], "offsets": [123, 124]}], "trigger": {"text": "provide", "tokens": ["provide"], "offsets": [118]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [185]}, {"text": "variant of the encoder layer", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["variant", "of", "the", "encoder", "layer"], "offsets": [188, 189, 190, 191, 192]}, {"text": "decouples", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["decouples"], "offsets": [194]}, {"text": "provides", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["provides"], "offsets": [203]}], "trigger": {"text": "provide", "tokens": ["provide"], "offsets": [186]}}, {"event_type": "PUR", "arguments": [{"text": "relationship", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["relationship"], "offsets": [196]}, {"text": "between key and value vector", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["between", "key", "and", "value", "vector"], "offsets": [197, 198, 199, 200, 201]}], "trigger": {"text": "decouples", "tokens": ["decouples"], "offsets": [194]}}, {"event_type": "PUR", "arguments": [{"text": "identifiable weights", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["identifiable", "weights"], "offsets": [204, 205]}, {"text": "up to the desired length of the input", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["up", "to", "the", "desired", "length", "of", "the", "input"], "offsets": [206, 207, 208, 209, 210, 211, 212, 213]}], "trigger": {"text": "provides", "tokens": ["provides"], "offsets": [203]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [215]}, {"text": "empirical justifications", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["empirical", "justifications"], "offsets": [224, 225]}, {"text": "prove", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["prove"], "offsets": [216]}, {"text": "varied text classification tasks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["varied", "text", "classification", "tasks"], "offsets": [227, 228, 229, 230]}], "trigger": {"text": "providing", "tokens": ["providing"], "offsets": [223]}}, {"event_type": "PUR", "arguments": [{"text": "applicability of such variations", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["applicability", "of", "such", "variations"], "offsets": [218, 219, 220, 221]}], "trigger": {"text": "prove", "tokens": ["prove"], "offsets": [216]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [138]}, {"text": "more identifiable", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["more", "identifiable"], "offsets": [144, 145]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [139]}}, {"event_type": "CMP", "arguments": [{"text": "attention weights", "nugget_type": "FEA", "argument_type": "Arg1", "tokens": ["attention", "weights"], "offsets": [141, 142]}, {"text": "more identifiable", "nugget_type": "STR", "argument_type": "Result", "tokens": ["more", "identifiable"], "offsets": [144, 145]}, {"text": "by uncovering the hidden role of the key vector", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["by", "uncovering", "the", "hidden", "role", "of", "the", "key", "vector"], "offsets": [150, 151, 152, 153, 154, 155, 156, 157, 158]}], "trigger": {"text": "more identifiable", "tokens": ["more", "identifiable"], "offsets": [144, 145]}}], "document": ["interpretability", "is", "an", "important", "aspect", "of", "the", "trustworthiness", "of", "a", "model", "\u2019", "s", "predictions", ".", "transformer", "\u2019", "s", "predictions", "are", "widely", "explained", "by", "the", "attention", "weights", ",", "i", ".", "e", ".", ",", "a", "probability", "distribution", "generated", "at", "its", "self", "-", "attention", "unit", "(", "head", ")", ".", "current", "empirical", "studies", "provide", "shreds", "of", "evidence", "that", "attention", "weights", "are", "not", "explanations", "by", "proving", "that", "they", "are", "not", "unique", ".", "a", "recent", "study", "showed", "theoretical", "justifications", "to", "this", "observation", "by", "proving", "the", "non", "-", "identifiability", "of", "attention", "weights", ".", "for", "a", "given", "input", "to", "a", "head", "and", "its", "output", ",", "if", "the", "attention", "weights", "generated", "in", "it", "are", "unique", ",", "we", "call", "the", "weights", "identifiable", ".", "in", "this", "work", ",", "we", "provide", "deeper", "theoretical", "analysis", "and", "empirical", "observations", "on", "the", "identifiability", "of", "attention", "weights", ".", "ignored", "in", "the", "previous", "works", ",", "we", "find", "the", "attention", "weights", "are", "more", "identifiable", "than", "we", "currently", "perceive", "by", "uncovering", "the", "hidden", "role", "of", "the", "key", "vector", ".", "however", ",", "the", "weights", "are", "still", "prone", "to", "be", "non", "-", "unique", "attentions", "that", "make", "them", "unfit", "for", "interpretation", ".", "to", "tackle", "this", "issue", ",", "we", "provide", "a", "variant", "of", "the", "encoder", "layer", "that", "decouples", "the", "relationship", "between", "key", "and", "value", "vector", "and", "provides", "identifiable", "weights", "up", "to", "the", "desired", "length", "of", "the", "input", ".", "we", "prove", "the", "applicability", "of", "such", "variations", "by", "providing", "empirical", "justifications", "on", "varied", "text", "classification", "tasks", ".", "the", "implementations", "are", "available", "at", "https", ":", "/", "/", "github", ".", "com", "/", "declare", "-", "lab", "/", "identifiable", "-", "transformers", "."]}, {"venue": "ACL", "title": "Learning from Dialogue after Deployment: Feed Yourself, Chatbot!", "abstract": "The majority of conversations a dialogue agent sees over its lifetime occur after it has already been trained and deployed, leaving a vast store of potential training signal untapped. In this work, we propose the self-feeding chatbot, a dialogue agent with the ability to extract new training examples from the conversations it participates in. As our agent engages in conversation, it also estimates user satisfaction in its responses. When the conversation appears to be going well, the user\u2019s responses become new training examples to imitate. When the agent believes it has made a mistake, it asks for feedback; learning to predict the feedback that will be given improves the chatbot\u2019s dialogue abilities further. On the PersonaChat chit-chat dataset with over 131k training examples, we find that learning from dialogue with a self-feeding chatbot significantly improves performance, regardless of the amount of traditional supervision.", "doc_id": "55e7974684d55ba83fcc4efb8d5aad0d", "publication_year": 2019, "sentences": ["the majority of conversations a dialogue agent sees over its lifetime occur after it has already been trained and deployed , leaving a vast store of potential training signal untapped .", "in this work , we propose the self - feeding chatbot , a dialogue agent with the ability to extract new training examples from the conversations it participates in .", "as our agent engages in conversation , it also estimates user satisfaction in its responses .", "when the conversation appears to be going well , the user \u2019 s responses become new training examples to imitate .", "when the agent believes it has made a mistake , it asks for feedback ; learning to predict the feedback that will be given improves the chatbot \u2019 s dialogue abilities further .", "on the personachat chit - chat dataset with over 131k training examples , we find that learning from dialogue with a self - feeding chatbot significantly improves performance , regardless of the amount of traditional supervision ."], "events": [{"event_type": "ITT", "arguments": [{"text": "dialogue agent", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["dialogue", "agent"], "offsets": [5, 6]}], "trigger": {"text": "sees", "tokens": ["sees"], "offsets": [7]}}, {"event_type": "RWF", "arguments": [{"text": "dialogue agent", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["dialogue", "agent"], "offsets": [5, 6]}, {"text": "potential training signal untapped", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["potential", "training", "signal", "untapped"], "offsets": [26, 27, 28, 29]}], "trigger": {"text": "leaving", "tokens": ["leaving"], "offsets": [21]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [35]}, {"text": "self - feeding chatbot", "nugget_type": "APP", "argument_type": "Content", "tokens": ["self", "-", "feeding", "chatbot"], "offsets": [38, 39, 40, 41]}, {"text": "extract", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["extract"], "offsets": [50]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [36]}}, {"event_type": "PUR", "arguments": [{"text": "new training examples", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["new", "training", "examples"], "offsets": [51, 52, 53]}, {"text": "from the conversations it participates in", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "the", "conversations", "training", "examples", "participates", "in"], "offsets": [54, 55, 56, 52, 53, 58, 59]}], "trigger": {"text": "extract", "tokens": ["extract"], "offsets": [50]}}, {"event_type": "MDS", "arguments": [{"text": "in its responses", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "its", "responses"], "offsets": [73, 74, 75]}, {"text": "user satisfaction", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["user", "satisfaction"], "offsets": [71, 72]}], "trigger": {"text": "estimates", "tokens": ["estimates"], "offsets": [70]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [144]}, {"text": "improves", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["improves"], "offsets": [157]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [145]}}, {"event_type": "CMP", "arguments": [{"text": "significantly", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significantly"], "offsets": [156]}, {"text": "improves", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improves"], "offsets": [157]}, {"text": "performance", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["performance"], "offsets": [158]}, {"text": "regardless of the amount of traditional supervision", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["regardless", "of", "the", "amount", "of", "traditional", "supervision"], "offsets": [160, 161, 162, 163, 164, 165, 166]}], "trigger": {"text": "improves", "tokens": ["improves"], "offsets": [157]}}], "document": ["the", "majority", "of", "conversations", "a", "dialogue", "agent", "sees", "over", "its", "lifetime", "occur", "after", "it", "has", "already", "been", "trained", "and", "deployed", ",", "leaving", "a", "vast", "store", "of", "potential", "training", "signal", "untapped", ".", "in", "this", "work", ",", "we", "propose", "the", "self", "-", "feeding", "chatbot", ",", "a", "dialogue", "agent", "with", "the", "ability", "to", "extract", "new", "training", "examples", "from", "the", "conversations", "it", "participates", "in", ".", "as", "our", "agent", "engages", "in", "conversation", ",", "it", "also", "estimates", "user", "satisfaction", "in", "its", "responses", ".", "when", "the", "conversation", "appears", "to", "be", "going", "well", ",", "the", "user", "\u2019", "s", "responses", "become", "new", "training", "examples", "to", "imitate", ".", "when", "the", "agent", "believes", "it", "has", "made", "a", "mistake", ",", "it", "asks", "for", "feedback", ";", "learning", "to", "predict", "the", "feedback", "that", "will", "be", "given", "improves", "the", "chatbot", "\u2019", "s", "dialogue", "abilities", "further", ".", "on", "the", "personachat", "chit", "-", "chat", "dataset", "with", "over", "131k", "training", "examples", ",", "we", "find", "that", "learning", "from", "dialogue", "with", "a", "self", "-", "feeding", "chatbot", "significantly", "improves", "performance", ",", "regardless", "of", "the", "amount", "of", "traditional", "supervision", "."]}, {"venue": "ACL", "title": "Neural Retrieval for Question Answering with Cross-Attention Supervised Data Augmentation", "abstract": "Early fusion models with cross-attention have shown better-than-human performance on some question answer benchmarks, while it is a poor fit for retrieval since it prevents pre-computation of the answer representations. We present a supervised data mining method using an accurate early fusion model to improve the training of an efficient late fusion retrieval model. We first train an accurate classification model with cross-attention between questions and answers. The cross-attention model is then used to annotate additional passages in order to generate weighted training examples for a neural retrieval model. The resulting retrieval model with additional data significantly outperforms retrieval models directly trained with gold annotations on Precision at N (P@N) and Mean Reciprocal Rank (MRR).", "doc_id": "90071409616afbd800f170d18f909a66", "publication_year": 2021, "sentences": ["early fusion models with cross - attention have shown better - than - human performance on some question answer benchmarks , while it is a poor fit for retrieval since it prevents pre - computation of the answer representations .", "we present a supervised data mining method using an accurate early fusion model to improve the training of an efficient late fusion retrieval model .", "we first train an accurate classification model with cross - attention between questions and answers .", "the cross - attention model is then used to annotate additional passages in order to generate weighted training examples for a neural retrieval model .", "the resulting retrieval model with additional data significantly outperforms retrieval models directly trained with gold annotations on precision at n ( p @ n ) and mean reciprocal rank ( mrr ) ."], "events": [{"event_type": "ITT", "arguments": [{"text": "early fusion models with cross - attention", "nugget_type": "APP", "argument_type": "Target", "tokens": ["early", "fusion", "models", "with", "cross", "-", "attention"], "offsets": [0, 1, 2, 3, 4, 5, 6]}], "trigger": {"text": "shown", "tokens": ["shown"], "offsets": [8]}}, {"event_type": "RWF", "arguments": [{"text": "early fusion models with cross - attention", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["early", "fusion", "models", "with", "cross", "-", "attention"], "offsets": [0, 1, 2, 3, 4, 5, 6]}, {"text": "retrieval", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["retrieval"], "offsets": [28]}], "trigger": {"text": "poor fit", "tokens": ["poor", "fit"], "offsets": [25, 26]}}, {"event_type": "RWF", "arguments": [{"text": "early fusion models with cross - attention", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["early", "fusion", "models", "with", "cross", "-", "attention"], "offsets": [0, 1, 2, 3, 4, 5, 6]}, {"text": "prevents", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["prevents"], "offsets": [31]}], "trigger": {"text": "prevents", "tokens": ["prevents"], "offsets": [31]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [40]}, {"text": "supervised data mining method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["supervised", "data", "mining", "method"], "offsets": [43, 44, 45, 46]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [41]}}, {"event_type": "MDS", "arguments": [{"text": "accurate early fusion model", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["accurate", "early", "fusion", "model"], "offsets": [49, 50, 51, 52]}, {"text": "improve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improve"], "offsets": [54]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [47]}}, {"event_type": "PUR", "arguments": [{"text": "training of an efficient late fusion retrieval model", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["training", "of", "an", "efficient", "late", "fusion", "retrieval", "model"], "offsets": [56, 57, 58, 59, 60, 61, 62, 63]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [54]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [65]}, {"text": "accurate classification model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["accurate", "classification", "model"], "offsets": [69, 70, 71]}, {"text": "with cross - attention between questions and answers", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "cross", "-", "attention", "between", "questions", "and", "answers"], "offsets": [72, 73, 74, 75, 76, 77, 78, 79]}], "trigger": {"text": "train", "tokens": ["train"], "offsets": [67]}}, {"event_type": "MDS", "arguments": [{"text": "cross - attention model", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["cross", "-", "attention", "model"], "offsets": [82, 83, 84, 85]}, {"text": "additional passages", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["additional", "passages"], "offsets": [91, 92]}, {"text": "generate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["generate"], "offsets": [96]}, {"text": "neural retrieval model", "nugget_type": "APP", "argument_type": "Target", "tokens": ["neural", "retrieval", "model"], "offsets": [102, 103, 104]}], "trigger": {"text": "used", "tokens": ["used"], "offsets": [88]}}, {"event_type": "PUR", "arguments": [{"text": "weighted training examples", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["weighted", "training", "examples"], "offsets": [97, 98, 99]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [96]}}, {"event_type": "CMP", "arguments": [{"text": "resulting retrieval model with additional data", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["resulting", "retrieval", "model", "with", "additional", "data"], "offsets": [107, 108, 109, 110, 111, 112]}, {"text": "significantly outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["significantly", "outperforms"], "offsets": [113, 114]}, {"text": "retrieval models directly trained with gold annotations", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["retrieval", "models", "directly", "trained", "with", "gold", "annotations"], "offsets": [115, 116, 117, 118, 119, 120, 121]}, {"text": "precision", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["precision"], "offsets": [123]}], "trigger": {"text": "significantly outperforms", "tokens": ["significantly", "outperforms"], "offsets": [113, 114]}}], "document": ["early", "fusion", "models", "with", "cross", "-", "attention", "have", "shown", "better", "-", "than", "-", "human", "performance", "on", "some", "question", "answer", "benchmarks", ",", "while", "it", "is", "a", "poor", "fit", "for", "retrieval", "since", "it", "prevents", "pre", "-", "computation", "of", "the", "answer", "representations", ".", "we", "present", "a", "supervised", "data", "mining", "method", "using", "an", "accurate", "early", "fusion", "model", "to", "improve", "the", "training", "of", "an", "efficient", "late", "fusion", "retrieval", "model", ".", "we", "first", "train", "an", "accurate", "classification", "model", "with", "cross", "-", "attention", "between", "questions", "and", "answers", ".", "the", "cross", "-", "attention", "model", "is", "then", "used", "to", "annotate", "additional", "passages", "in", "order", "to", "generate", "weighted", "training", "examples", "for", "a", "neural", "retrieval", "model", ".", "the", "resulting", "retrieval", "model", "with", "additional", "data", "significantly", "outperforms", "retrieval", "models", "directly", "trained", "with", "gold", "annotations", "on", "precision", "at", "n", "(", "p", "@", "n", ")", "and", "mean", "reciprocal", "rank", "(", "mrr", ")", "."]}, {"venue": "ACL", "title": "Discovering Dialogue Slots with Weak Supervision", "abstract": "Task-oriented dialogue systems typically require manual annotation of dialogue slots in training data, which is costly to obtain. We propose a method that eliminates this requirement: We use weak supervision from existing linguistic annotation models to identify potential slot candidates, then automatically identify domain-relevant slots by using clustering algorithms. Furthermore, we use the resulting slot annotation to train a neural-network-based tagger that is able to perform slot tagging with no human intervention. This tagger is trained solely on the outputs of our method and thus does not rely on any labeled data. Our model demonstrates state-of-the-art performance in slot tagging without labeled training data on four different dialogue domains. Moreover, we find that slot annotations discovered by our model significantly improve the performance of an end-to-end dialogue response generation model, compared to using no slot annotation at all.", "doc_id": "46241c85921845cf3098c0d2226b9958", "publication_year": 2021, "sentences": ["task - oriented dialogue systems typically require manual annotation of dialogue slots in training data , which is costly to obtain .", "we propose a method that eliminates this requirement : we use weak supervision from existing linguistic annotation models to identify potential slot candidates , then automatically identify domain - relevant slots by using clustering algorithms .", "furthermore , we use the resulting slot annotation to train a neural - network - based tagger that is able to perform slot tagging with no human intervention .", "this tagger is trained solely on the outputs of our method and thus does not rely on any labeled data .", "our model demonstrates state - of - the - art performance in slot tagging without labeled training data on four different dialogue domains .", "moreover , we find that slot annotations discovered by our model significantly improve the performance of an end - to - end dialogue response generation model , compared to using no slot annotation at all ."], "events": [{"event_type": "RWF", "arguments": [{"text": "costly", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["costly"], "offsets": [18]}, {"text": "manual annotation of dialogue slots", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["manual", "annotation", "of", "dialogue", "slots"], "offsets": [7, 8, 9, 10, 11]}], "trigger": {"text": "obtain", "tokens": ["obtain"], "offsets": [20]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [22]}, {"text": "method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["method"], "offsets": [25]}, {"text": "eliminates", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["eliminates"], "offsets": [27]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [23]}}, {"event_type": "PUR", "arguments": [{"text": "this requirement", "nugget_type": "WEA", "argument_type": "Aim", "tokens": ["this", "requirement"], "offsets": [28, 29]}], "trigger": {"text": "eliminates", "tokens": ["eliminates"], "offsets": [27]}}, {"event_type": "PUR", "arguments": [{"text": "potential slot candidates", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["potential", "slot", "candidates"], "offsets": [42, 43, 44]}], "trigger": {"text": "identify", "tokens": ["identify"], "offsets": [41]}}, {"event_type": "WKS", "arguments": [{"text": "clustering algorithms", "nugget_type": "APP", "argument_type": "Content", "tokens": ["clustering", "algorithms"], "offsets": [55, 56]}, {"text": "automatically identify", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["automatically", "identify"], "offsets": [47, 48]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [54]}}, {"event_type": "PUR", "arguments": [{"text": "domain - relevant slots", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["domain", "-", "relevant", "slots"], "offsets": [49, 50, 51, 52]}], "trigger": {"text": "automatically identify", "tokens": ["automatically", "identify"], "offsets": [47, 48]}}, {"event_type": "PUR", "arguments": [{"text": "neural - network - based tagger", "nugget_type": "MOD", "argument_type": "Aim", "tokens": ["neural", "-", "network", "-", "based", "tagger"], "offsets": [69, 70, 71, 72, 73, 74]}], "trigger": {"text": "train", "tokens": ["train"], "offsets": [67]}}, {"event_type": "FAC", "arguments": [{"text": "without labeled training data", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["without", "labeled", "training", "data"], "offsets": [122, 123, 124, 125]}, {"text": "model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["model"], "offsets": [109]}, {"text": "state - of - the - art performance", "nugget_type": "STR", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art", "performance"], "offsets": [111, 112, 113, 114, 115, 116, 117, 118]}, {"text": "slot tagging", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["slot", "tagging"], "offsets": [120, 121]}, {"text": "on four different dialogue domains", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "four", "different", "dialogue", "domains"], "offsets": [126, 127, 128, 129, 130]}], "trigger": {"text": "demonstrates", "tokens": ["demonstrates"], "offsets": [110]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [134]}, {"text": "significantly improve", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["significantly", "improve"], "offsets": [143, 144]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [135]}}, {"event_type": "CMP", "arguments": [{"text": "performance of an end - to - end dialogue response generation model", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["performance", "of", "an", "end", "-", "to", "-", "end", "dialogue", "response", "generation", "model"], "offsets": [146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157]}, {"text": "significantly improve", "nugget_type": "STR", "argument_type": "Result", "tokens": ["significantly", "improve"], "offsets": [143, 144]}, {"text": "using no slot annotation at all", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["using", "no", "slot", "annotation", "at", "all"], "offsets": [161, 162, 163, 164, 165, 166]}], "trigger": {"text": "significantly improve", "tokens": ["significantly", "improve"], "offsets": [143, 144]}}, {"event_type": "MDS", "arguments": [{"text": "weak supervision", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["weak", "supervision"], "offsets": [33, 34]}, {"text": "existing linguistic annotation models", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["existing", "linguistic", "annotation", "models"], "offsets": [36, 37, 38, 39]}, {"text": "identify", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["identify"], "offsets": [41]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [32]}}, {"event_type": "MDS", "arguments": [{"text": "resulting slot annotation", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["resulting", "slot", "annotation"], "offsets": [63, 64, 65]}, {"text": "train", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["train"], "offsets": [67]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [61]}}], "document": ["task", "-", "oriented", "dialogue", "systems", "typically", "require", "manual", "annotation", "of", "dialogue", "slots", "in", "training", "data", ",", "which", "is", "costly", "to", "obtain", ".", "we", "propose", "a", "method", "that", "eliminates", "this", "requirement", ":", "we", "use", "weak", "supervision", "from", "existing", "linguistic", "annotation", "models", "to", "identify", "potential", "slot", "candidates", ",", "then", "automatically", "identify", "domain", "-", "relevant", "slots", "by", "using", "clustering", "algorithms", ".", "furthermore", ",", "we", "use", "the", "resulting", "slot", "annotation", "to", "train", "a", "neural", "-", "network", "-", "based", "tagger", "that", "is", "able", "to", "perform", "slot", "tagging", "with", "no", "human", "intervention", ".", "this", "tagger", "is", "trained", "solely", "on", "the", "outputs", "of", "our", "method", "and", "thus", "does", "not", "rely", "on", "any", "labeled", "data", ".", "our", "model", "demonstrates", "state", "-", "of", "-", "the", "-", "art", "performance", "in", "slot", "tagging", "without", "labeled", "training", "data", "on", "four", "different", "dialogue", "domains", ".", "moreover", ",", "we", "find", "that", "slot", "annotations", "discovered", "by", "our", "model", "significantly", "improve", "the", "performance", "of", "an", "end", "-", "to", "-", "end", "dialogue", "response", "generation", "model", ",", "compared", "to", "using", "no", "slot", "annotation", "at", "all", "."]}, {"venue": "ACL", "title": "SummN: A Multi-Stage Summarization Framework for Long Input Dialogues and Documents", "abstract": "Text summarization helps readers capture salient information from documents, news, interviews, and meetings. However, most state-of-the-art pretrained language models (LM) are unable to efficiently process long text for many summarization tasks. In this paper, we propose SummN, a simple, flexible, and effective multi-stage framework for input texts that are longer than the maximum context length of typical pretrained LMs. SummN first splits the data samples and generates a coarse summary in multiple stages and then produces the final fine-grained summary based on it. Our framework can process input text of arbitrary length by adjusting the number of stages while keeping the LM input size fixed. Moreover, it can deal with both single-source documents and dialogues, and it can be used on top of different backbone abstractive summarization models. To the best of our knowledge, SummN is the first multi-stage split-then-summarize framework for long input summarization. Our experiments demonstrate that SummN outperforms previous state-of-the-art methods by improving ROUGE scores on three long meeting summarization datasets AMI, ICSI, and QMSum, two long TV series datasets from SummScreen, and a long document summarization dataset GovReport. Our data and code are available at https://github.com/psunlpgroup/Summ-N.", "doc_id": "c15b9cdde332587189ec50d769ca6da2", "publication_year": 2022, "sentences": ["text summarization helps readers capture salient information from documents , news , interviews , and meetings .", "however , most state - of - the - art pretrained language models ( lm ) are unable to efficiently process long text for many summarization tasks .", "in this paper , we propose summn , a simple , flexible , and effective multi - stage framework for input texts that are longer than the maximum context length of typical pretrained lms .", "summn first splits the data samples and generates a coarse summary in multiple stages and then produces the final fine - grained summary based on it .", "our framework can process input text of arbitrary length by adjusting the number of stages while keeping the lm input size fixed .", "moreover , it can deal with both single - source documents and dialogues , and it can be used on top of different backbone abstractive summarization models .", "to the best of our knowledge , summn is the first multi - stage split - then - summarize framework for long input summarization .", "our experiments demonstrate that summn outperforms previous state - of - the - art methods by improving rouge scores on three long meeting summarization datasets ami , icsi , and qmsum , two long tv series datasets from summscreen , and a long document summarization dataset govreport .", "our data and code are available at https : / / github . com / psunlpgroup / summ - n ."], "events": [{"event_type": "ITT", "arguments": [{"text": "text summarization", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["text", "summarization"], "offsets": [0, 1]}], "trigger": {"text": "capture", "tokens": ["capture"], "offsets": [4]}}, {"event_type": "RWF", "arguments": [{"text": "most state - of - the - art pretrained language models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["most", "state", "-", "of", "-", "the", "-", "art", "pretrained", "language", "models"], "offsets": [19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29]}], "trigger": {"text": "unable to efficiently process", "tokens": ["unable", "to", "efficiently", "process"], "offsets": [34, 35, 36, 37]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [49]}, {"text": "simple , flexible , and effective multi - stage framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["simple", ",", "flexible", ",", "and", "effective", "multi", "-", "stage", "framework"], "offsets": [54, 55, 56, 57, 58, 59, 60, 61, 62, 63]}, {"text": "input", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["input"], "offsets": [65]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [50]}}, {"event_type": "PUR", "arguments": [{"text": "texts that are longer than the maximum context length of typical pretrained lms", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["texts", "that", "are", "longer", "than", "the", "maximum", "context", "length", "of", "typical", "pretrained", "language", "models"], "offsets": [66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 28, 29]}], "trigger": {"text": "input", "tokens": ["input"], "offsets": [65]}}, {"event_type": "MDS", "arguments": [{"text": "data samples", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["data", "samples"], "offsets": [84, 85]}], "trigger": {"text": "splits", "tokens": ["splits"], "offsets": [82]}}, {"event_type": "MDS", "arguments": [{"text": "coarse summary", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["coarse", "summary"], "offsets": [89, 90]}, {"text": "in multiple stages", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "multiple", "stages"], "offsets": [91, 92, 93]}], "trigger": {"text": "generates", "tokens": ["generates"], "offsets": [87]}}, {"event_type": "MDS", "arguments": [{"text": "coarse summary", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["coarse", "summary"], "offsets": [89, 90]}, {"text": "final fine - grained summary", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["final", "fine", "-", "grained", "summary"], "offsets": [98, 99, 100, 101, 102]}], "trigger": {"text": "produces", "tokens": ["produces"], "offsets": [96]}}, {"event_type": "FAC", "arguments": [{"text": "simple , flexible , and effective multi - stage framework", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["simple", ",", "flexible", ",", "and", "effective", "multi", "-", "stage", "framework"], "offsets": [54, 55, 56, 57, 58, 59, 60, 61, 62, 63]}, {"text": "input text of arbitrary length", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["input", "text", "of", "arbitrary", "length"], "offsets": [111, 112, 113, 114, 115]}, {"text": "adjusting the number of stages", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["adjusting", "the", "number", "of", "stages"], "offsets": [117, 118, 119, 120, 121]}, {"text": "while keeping the lm input size fixed", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["while", "keeping", "the", "lm", "input", "size", "fixed"], "offsets": [122, 123, 124, 125, 126, 127, 128]}], "trigger": {"text": "process", "tokens": ["process"], "offsets": [110]}}, {"event_type": "FAC", "arguments": [{"text": "simple , flexible , and effective multi - stage framework", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["simple", ",", "flexible", ",", "and", "effective", "multi", "-", "stage", "framework"], "offsets": [54, 55, 56, 57, 58, 59, 60, 61, 62, 63]}, {"text": "single - source documents", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["single", "-", "source", "documents"], "offsets": [137, 138, 139, 140]}, {"text": "single - source dialogues", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["single", "-", "source", "dialogues"], "offsets": [137, 138, 139, 142]}], "trigger": {"text": "deal with", "tokens": ["deal", "with"], "offsets": [134, 135]}}, {"event_type": "FAC", "arguments": [{"text": "simple , flexible , and effective multi - stage framework", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["simple", ",", "flexible", ",", "and", "effective", "multi", "-", "stage", "framework"], "offsets": [54, 55, 56, 57, 58, 59, 60, 61, 62, 63]}, {"text": "on top of different backbone abstractive summarization models", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "top", "of", "different", "backbone", "abstractive", "summarization", "models"], "offsets": [149, 150, 151, 152, 153, 154, 155, 156]}], "trigger": {"text": "used", "tokens": ["used"], "offsets": [148]}}, {"event_type": "CMP", "arguments": [{"text": "summn", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["summn"], "offsets": [187]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [188]}, {"text": "previous state - of - the - art methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["previous", "state", "-", "of", "-", "the", "-", "art", "methods"], "offsets": [189, 190, 191, 192, 193, 194, 195, 196, 197]}, {"text": "improving", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improving"], "offsets": [199]}, {"text": "rouge scores", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["rouge", "scores"], "offsets": [200, 201]}, {"text": "three long meeting summarization datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["three", "long", "meeting", "summarization", "datasets"], "offsets": [203, 204, 205, 206, 207]}, {"text": "two long tv series datasets from summscreen", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["two", "long", "tv", "series", "datasets", "from", "summscreen"], "offsets": [215, 216, 217, 218, 219, 220, 221]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [188]}}], "document": ["text", "summarization", "helps", "readers", "capture", "salient", "information", "from", "documents", ",", "news", ",", "interviews", ",", "and", "meetings", ".", "however", ",", "most", "state", "-", "of", "-", "the", "-", "art", "pretrained", "language", "models", "(", "lm", ")", "are", "unable", "to", "efficiently", "process", "long", "text", "for", "many", "summarization", "tasks", ".", "in", "this", "paper", ",", "we", "propose", "summn", ",", "a", "simple", ",", "flexible", ",", "and", "effective", "multi", "-", "stage", "framework", "for", "input", "texts", "that", "are", "longer", "than", "the", "maximum", "context", "length", "of", "typical", "pretrained", "lms", ".", "summn", "first", "splits", "the", "data", "samples", "and", "generates", "a", "coarse", "summary", "in", "multiple", "stages", "and", "then", "produces", "the", "final", "fine", "-", "grained", "summary", "based", "on", "it", ".", "our", "framework", "can", "process", "input", "text", "of", "arbitrary", "length", "by", "adjusting", "the", "number", "of", "stages", "while", "keeping", "the", "lm", "input", "size", "fixed", ".", "moreover", ",", "it", "can", "deal", "with", "both", "single", "-", "source", "documents", "and", "dialogues", ",", "and", "it", "can", "be", "used", "on", "top", "of", "different", "backbone", "abstractive", "summarization", "models", ".", "to", "the", "best", "of", "our", "knowledge", ",", "summn", "is", "the", "first", "multi", "-", "stage", "split", "-", "then", "-", "summarize", "framework", "for", "long", "input", "summarization", ".", "our", "experiments", "demonstrate", "that", "summn", "outperforms", "previous", "state", "-", "of", "-", "the", "-", "art", "methods", "by", "improving", "rouge", "scores", "on", "three", "long", "meeting", "summarization", "datasets", "ami", ",", "icsi", ",", "and", "qmsum", ",", "two", "long", "tv", "series", "datasets", "from", "summscreen", ",", "and", "a", "long", "document", "summarization", "dataset", "govreport", ".", "our", "data", "and", "code", "are", "available", "at", "https", ":", "/", "/", "github", ".", "com", "/", "psunlpgroup", "/", "summ", "-", "n", "."]}, {"venue": "ACL", "title": "Cross-Lingual Semantic Role Labeling with High-Quality Translated Training Corpus", "abstract": "Many efforts of research are devoted to semantic role labeling (SRL) which is crucial for natural language understanding. Supervised approaches have achieved impressing performances when large-scale corpora are available for resource-rich languages such as English. While for the low-resource languages with no annotated SRL dataset, it is still challenging to obtain competitive performances. Cross-lingual SRL is one promising way to address the problem, which has achieved great advances with the help of model transferring and annotation projection. In this paper, we propose a novel alternative based on corpus translation, constructing high-quality training datasets for the target languages from the source gold-standard SRL annotations. Experimental results on Universal Proposition Bank show that the translation-based method is highly effective, and the automatic pseudo datasets can improve the target-language SRL performances significantly.", "doc_id": "52165b01f232124358edd885fae5f71f", "publication_year": 2020, "sentences": ["many efforts of research are devoted to semantic role labeling ( srl ) which is crucial for natural language understanding .", "supervised approaches have achieved impressing performances when large - scale corpora are available for resource - rich languages such as english .", "while for the low - resource languages with no annotated srl dataset , it is still challenging to obtain competitive performances .", "cross - lingual srl is one promising way to address the problem , which has achieved great advances with the help of model transferring and annotation projection .", "in this paper , we propose a novel alternative based on corpus translation , constructing high - quality training datasets for the target languages from the source gold - standard srl annotations .", "experimental results on universal proposition bank show that the translation - based method is highly effective , and the automatic pseudo datasets can improve the target - language srl performances significantly ."], "events": [{"event_type": "ITT", "arguments": [{"text": "natural language understanding", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["natural", "language", "understanding"], "offsets": [17, 18, 19]}], "trigger": {"text": "crucial", "tokens": ["crucial"], "offsets": [15]}}, {"event_type": "RWF", "arguments": [{"text": "competitive performances", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["competitive", "performances"], "offsets": [62, 63]}, {"text": "challenging", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["challenging"], "offsets": [59]}, {"text": "for the low - resource languages with no annotated srl dataset", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["for", "the", "low", "-", "resource", "languages", "with", "no", "annotated", "srl", "dataset"], "offsets": [44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54]}], "trigger": {"text": "obtain", "tokens": ["obtain"], "offsets": [61]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [97]}, {"text": "alternative based on corpus translation", "nugget_type": "APP", "argument_type": "Content", "tokens": ["alternative", "based", "on", "corpus", "translation"], "offsets": [101, 102, 103, 104, 105]}, {"text": "constructing", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["constructing"], "offsets": [107]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [98]}}, {"event_type": "FIN", "arguments": [{"text": "highly effective", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["highly", "effective"], "offsets": [140, 141]}, {"text": "improve", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["improve"], "offsets": [149]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [132]}}, {"event_type": "FAC", "arguments": [{"text": "translation - based method", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["translation", "-", "based", "method"], "offsets": [135, 136, 137, 138]}], "trigger": {"text": "highly effective", "tokens": ["highly", "effective"], "offsets": [140, 141]}}, {"event_type": "FAC", "arguments": [{"text": "automatic pseudo datasets", "nugget_type": "DST", "argument_type": "Subject", "tokens": ["automatic", "pseudo", "datasets"], "offsets": [145, 146, 147]}, {"text": "significantly", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significantly"], "offsets": [156]}, {"text": "target - language srl performances", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["target", "-", "language", "semantic", "role", "labeling", "performances"], "offsets": [151, 152, 153, 7, 8, 9, 155]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [149]}}, {"event_type": "PUR", "arguments": [{"text": "from the source gold - standard srl annotations", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "the", "source", "gold", "-", "standard", "srl", "annotations"], "offsets": [117, 118, 119, 120, 121, 122, 123, 124]}, {"text": "high - quality training datasets for the target languages", "nugget_type": "DST", "argument_type": "Aim", "tokens": ["high", "-", "quality", "training", "datasets", "for", "the", "target", "languages"], "offsets": [108, 109, 110, 111, 112, 113, 114, 115, 116]}], "trigger": {"text": "constructing", "tokens": ["constructing"], "offsets": [107]}}], "document": ["many", "efforts", "of", "research", "are", "devoted", "to", "semantic", "role", "labeling", "(", "srl", ")", "which", "is", "crucial", "for", "natural", "language", "understanding", ".", "supervised", "approaches", "have", "achieved", "impressing", "performances", "when", "large", "-", "scale", "corpora", "are", "available", "for", "resource", "-", "rich", "languages", "such", "as", "english", ".", "while", "for", "the", "low", "-", "resource", "languages", "with", "no", "annotated", "srl", "dataset", ",", "it", "is", "still", "challenging", "to", "obtain", "competitive", "performances", ".", "cross", "-", "lingual", "srl", "is", "one", "promising", "way", "to", "address", "the", "problem", ",", "which", "has", "achieved", "great", "advances", "with", "the", "help", "of", "model", "transferring", "and", "annotation", "projection", ".", "in", "this", "paper", ",", "we", "propose", "a", "novel", "alternative", "based", "on", "corpus", "translation", ",", "constructing", "high", "-", "quality", "training", "datasets", "for", "the", "target", "languages", "from", "the", "source", "gold", "-", "standard", "srl", "annotations", ".", "experimental", "results", "on", "universal", "proposition", "bank", "show", "that", "the", "translation", "-", "based", "method", "is", "highly", "effective", ",", "and", "the", "automatic", "pseudo", "datasets", "can", "improve", "the", "target", "-", "language", "srl", "performances", "significantly", "."]}, {"venue": "ACL", "title": "Multiscale Collaborative Deep Models for Neural Machine Translation", "abstract": "Recent evidence reveals that Neural Machine Translation (NMT) models with deeper neural networks can be more effective but are difficult to train. In this paper, we present a MultiScale Collaborative (MSC) framework to ease the training of NMT models that are substantially deeper than those used previously. We explicitly boost the gradient back-propagation from top to bottom levels by introducing a block-scale collaboration mechanism into deep NMT models. Then, instead of forcing the whole encoder stack directly learns a desired representation, we let each encoder block learns a fine-grained representation and enhance it by encoding spatial dependencies using a context-scale collaboration. We provide empirical evidence showing that the MSC nets are easy to optimize and can obtain improvements of translation quality from considerably increased depth. On IWSLT translation tasks with three translation directions, our extremely deep models (with 72-layer encoders) surpass strong baselines by +2.2~+3.1 BLEU points. In addition, our deep MSC achieves a BLEU score of 30.56 on WMT14 English-to-German task that significantly outperforms state-of-the-art deep NMT models. We have included the source code in supplementary materials.", "doc_id": "546a625d25e4a4fe5ca6216733cc2b88", "publication_year": 2020, "sentences": ["recent evidence reveals that neural machine translation ( nmt ) models with deeper neural networks can be more effective but are difficult to train .", "in this paper , we present a multiscale collaborative ( msc ) framework to ease the training of nmt models that are substantially deeper than those used previously .", "we explicitly boost the gradient back - propagation from top to bottom levels by introducing a block - scale collaboration mechanism into deep nmt models .", "then , instead of forcing the whole encoder stack directly learns a desired representation , we let each encoder block learns a fine - grained representation and enhance it by encoding spatial dependencies using a context - scale collaboration .", "we provide empirical evidence showing that the msc nets are easy to optimize and can obtain improvements of translation quality from considerably increased depth .", "on iwslt translation tasks with three translation directions , our extremely deep models ( with 72 - layer encoders ) surpass strong baselines by + 2 . 2 ~ + 3 . 1 bleu points .", "in addition , our deep msc achieves a bleu score of 30 . 56 on wmt14 english - to - german task that significantly outperforms state - of - the - art deep nmt models .", "we have included the source code in supplementary materials ."], "events": [{"event_type": "ITT", "arguments": [{"text": "neural machine translation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["neural", "machine", "translation"], "offsets": [4, 5, 6]}], "trigger": {"text": "effective", "tokens": ["effective"], "offsets": [18]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [29]}, {"text": "multiscale collaborative", "nugget_type": "APP", "argument_type": "Content", "tokens": ["multiscale", "collaborative"], "offsets": [32, 33]}, {"text": "ease", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["ease"], "offsets": [39]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [30]}}, {"event_type": "PUR", "arguments": [{"text": "training of nmt models", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["training", "of", "neural", "machine", "translation", "models"], "offsets": [41, 42, 4, 5, 6, 44]}], "trigger": {"text": "ease", "tokens": ["ease"], "offsets": [39]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [54]}, {"text": "gradient back - propagation from top to bottom levels", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["gradient", "back", "-", "propagation", "from", "top", "to", "bottom", "levels"], "offsets": [58, 59, 60, 61, 62, 63, 64, 65, 66]}], "trigger": {"text": "explicitly boost", "tokens": ["explicitly", "boost"], "offsets": [55, 56]}}, {"event_type": "MDS", "arguments": [{"text": "deep nmt models", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["deep", "neural", "machine", "translation", "models"], "offsets": [76, 4, 5, 6, 78]}, {"text": "block - scale collaboration mechanism", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["block", "-", "scale", "collaboration", "mechanism"], "offsets": [70, 71, 72, 73, 74]}], "trigger": {"text": "introducing", "tokens": ["introducing"], "offsets": [68]}}, {"event_type": "MDS", "arguments": [{"text": "spatial dependencies", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["spatial", "dependencies"], "offsets": [111, 112]}, {"text": "context - scale collaboration", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["context", "-", "scale", "collaboration"], "offsets": [115, 116, 117, 118]}], "trigger": {"text": "encoding", "tokens": ["encoding"], "offsets": [110]}}, {"event_type": "FIN", "arguments": [{"text": "easy", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["easy"], "offsets": [130]}, {"text": "obtain", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["obtain"], "offsets": [135]}], "trigger": {"text": "showing", "tokens": ["showing"], "offsets": [124]}}, {"event_type": "FAC", "arguments": [{"text": "msc nets", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["multiscale", "collaborative", "nets"], "offsets": [32, 33, 128]}, {"text": "optimize", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["optimize"], "offsets": [132]}], "trigger": {"text": "easy", "tokens": ["easy"], "offsets": [130]}}, {"event_type": "FAC", "arguments": [{"text": "msc nets", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["multiscale", "collaborative", "nets"], "offsets": [32, 33, 128]}, {"text": "from considerably increased depth", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "considerably", "increased", "depth"], "offsets": [140, 141, 142, 143]}, {"text": "improvements of translation quality", "nugget_type": "STR", "argument_type": "Object", "tokens": ["improvements", "of", "translation", "quality"], "offsets": [136, 137, 138, 139]}], "trigger": {"text": "obtain", "tokens": ["obtain"], "offsets": [135]}}, {"event_type": "CMP", "arguments": [{"text": "surpass", "nugget_type": "STR", "argument_type": "Result", "tokens": ["surpass"], "offsets": [165]}, {"text": "strong baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["strong", "baselines"], "offsets": [166, 167]}, {"text": "2 . 2 ~ + 3 . 1", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["2", ".", "2", "~", "+", "3", ".", "1"], "offsets": [170, 171, 172, 173, 174, 175, 176, 177]}, {"text": "bleu points", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["bleu", "points"], "offsets": [178, 179]}], "trigger": {"text": "surpass", "tokens": ["surpass"], "offsets": [165]}}, {"event_type": "FAC", "arguments": [{"text": "bleu score", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["bleu", "score"], "offsets": [189, 190]}, {"text": "30 . 56", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["30", ".", "56"], "offsets": [192, 193, 194]}, {"text": "on wmt14 english - to - german task", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "wmt14", "english", "-", "to", "-", "german", "task"], "offsets": [195, 196, 197, 198, 199, 200, 201, 202]}, {"text": "multiscale collaborative", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["multiscale", "collaborative"], "offsets": [32, 33]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [187]}}, {"event_type": "CMP", "arguments": [{"text": "multiscale collaborative", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["multiscale", "collaborative"], "offsets": [32, 33]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [205]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [205]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [95]}, {"text": "fine - grained representation", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["fine", "-", "grained", "representation"], "offsets": [102, 103, 104, 105]}], "trigger": {"text": "enhance", "tokens": ["enhance"], "offsets": [107]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [217]}, {"text": "source code", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["source", "code"], "offsets": [221, 222]}], "trigger": {"text": "included", "tokens": ["included"], "offsets": [219]}}], "document": ["recent", "evidence", "reveals", "that", "neural", "machine", "translation", "(", "nmt", ")", "models", "with", "deeper", "neural", "networks", "can", "be", "more", "effective", "but", "are", "difficult", "to", "train", ".", "in", "this", "paper", ",", "we", "present", "a", "multiscale", "collaborative", "(", "msc", ")", "framework", "to", "ease", "the", "training", "of", "nmt", "models", "that", "are", "substantially", "deeper", "than", "those", "used", "previously", ".", "we", "explicitly", "boost", "the", "gradient", "back", "-", "propagation", "from", "top", "to", "bottom", "levels", "by", "introducing", "a", "block", "-", "scale", "collaboration", "mechanism", "into", "deep", "nmt", "models", ".", "then", ",", "instead", "of", "forcing", "the", "whole", "encoder", "stack", "directly", "learns", "a", "desired", "representation", ",", "we", "let", "each", "encoder", "block", "learns", "a", "fine", "-", "grained", "representation", "and", "enhance", "it", "by", "encoding", "spatial", "dependencies", "using", "a", "context", "-", "scale", "collaboration", ".", "we", "provide", "empirical", "evidence", "showing", "that", "the", "msc", "nets", "are", "easy", "to", "optimize", "and", "can", "obtain", "improvements", "of", "translation", "quality", "from", "considerably", "increased", "depth", ".", "on", "iwslt", "translation", "tasks", "with", "three", "translation", "directions", ",", "our", "extremely", "deep", "models", "(", "with", "72", "-", "layer", "encoders", ")", "surpass", "strong", "baselines", "by", "+", "2", ".", "2", "~", "+", "3", ".", "1", "bleu", "points", ".", "in", "addition", ",", "our", "deep", "msc", "achieves", "a", "bleu", "score", "of", "30", ".", "56", "on", "wmt14", "english", "-", "to", "-", "german", "task", "that", "significantly", "outperforms", "state", "-", "of", "-", "the", "-", "art", "deep", "nmt", "models", ".", "we", "have", "included", "the", "source", "code", "in", "supplementary", "materials", "."]}, {"venue": "ACL", "title": "Discourse-Aware Neural Extractive Text Summarization", "abstract": "Recently BERT has been adopted for document encoding in state-of-the-art text summarization models. However, sentence-based extractive models often result in redundant or uninformative phrases in the extracted summaries. Also, long-range dependencies throughout a document are not well captured by BERT, which is pre-trained on sentence pairs instead of documents. To address these issues, we present a discourse-aware neural summarization model - DiscoBert. DiscoBert extracts sub-sentential discourse units (instead of sentences) as candidates for extractive selection on a finer granularity. To capture the long-range dependencies among discourse units, structural discourse graphs are constructed based on RST trees and coreference mentions, encoded with Graph Convolutional Networks. Experiments show that the proposed model outperforms state-of-the-art methods by a significant margin on popular summarization benchmarks compared to other BERT-base models.", "doc_id": "1053d9f4b807ebba3fa6c95058df16e3", "publication_year": 2020, "sentences": ["recently bert has been adopted for document encoding in state - of - the - art text summarization models .", "however , sentence - based extractive models often result in redundant or uninformative phrases in the extracted summaries .", "also , long - range dependencies throughout a document are not well captured by bert , which is pre - trained on sentence pairs instead of documents .", "to address these issues , we present a discourse - aware neural summarization model - discobert .", "discobert extracts sub - sentential discourse units ( instead of sentences ) as candidates for extractive selection on a finer granularity .", "to capture the long - range dependencies among discourse units , structural discourse graphs are constructed based on rst trees and coreference mentions , encoded with graph convolutional networks .", "experiments show that the proposed model outperforms state - of - the - art methods by a significant margin on popular summarization benchmarks compared to other bert - base models ."], "events": [{"event_type": "ITT", "arguments": [{"text": "document encoding", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["document", "encoding"], "offsets": [6, 7]}], "trigger": {"text": "adopted", "tokens": ["adopted"], "offsets": [4]}}, {"event_type": "RWF", "arguments": [{"text": "sentence - based extractive models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["sentence", "-", "based", "extractive", "models"], "offsets": [22, 23, 24, 25, 26]}, {"text": "redundant phrases", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["redundant", "phrases"], "offsets": [30, 33]}, {"text": "uninformative phrases", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["uninformative", "phrases"], "offsets": [32, 33]}], "trigger": {"text": "result", "tokens": ["result"], "offsets": [28]}}, {"event_type": "RWF", "arguments": [{"text": "bert", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["bert"], "offsets": [53]}, {"text": "long - range dependencies", "nugget_type": "FEA", "argument_type": "Fault", "tokens": ["long", "-", "range", "dependencies"], "offsets": [41, 42, 43, 44]}], "trigger": {"text": "not well captured", "tokens": ["not", "well", "captured"], "offsets": [49, 50, 51]}}, {"event_type": "RWS", "arguments": [{"text": "bert", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["bert"], "offsets": [53]}, {"text": "sentence pairs", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["sentence", "pairs"], "offsets": [61, 62]}], "trigger": {"text": "pre - trained", "tokens": ["pre", "-", "trained"], "offsets": [57, 58, 59]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [72]}, {"text": "discourse - aware neural summarization model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["discourse", "-", "aware", "neural", "summarization", "model"], "offsets": [75, 76, 77, 78, 79, 80]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [73]}}, {"event_type": "MDS", "arguments": [{"text": "sub - sentential discourse units", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["sub", "-", "sentential", "discourse", "units"], "offsets": [86, 87, 88, 89, 90]}, {"text": "candidates for extractive selection", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["candidates", "for", "extractive", "selection"], "offsets": [97, 98, 99, 100]}], "trigger": {"text": "extracts", "tokens": ["extracts"], "offsets": [85]}}, {"event_type": "MDS", "arguments": [{"text": "capture", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["capture"], "offsets": [107]}, {"text": "coreference mentions", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["coreference", "mentions"], "offsets": [127, 128]}, {"text": "structural discourse graphs", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["structural", "discourse", "graphs"], "offsets": [117, 118, 119]}, {"text": "rst trees", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["rst", "trees"], "offsets": [124, 125]}], "trigger": {"text": "constructed", "tokens": ["constructed"], "offsets": [121]}}, {"event_type": "PUR", "arguments": [{"text": "long - range dependencies among discourse units", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["long", "-", "range", "dependencies", "among", "discourse", "units"], "offsets": [109, 110, 111, 112, 113, 114, 115]}], "trigger": {"text": "capture", "tokens": ["capture"], "offsets": [107]}}, {"event_type": "MDS", "arguments": [{"text": "convolutional networks", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["convolutional", "networks"], "offsets": [133, 134]}, {"text": "structural discourse graphs", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["structural", "discourse", "graphs"], "offsets": [117, 118, 119]}], "trigger": {"text": "encoded", "tokens": ["encoded"], "offsets": [130]}}, {"event_type": "CMP", "arguments": [{"text": "bert - base models", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["bert", "-", "base", "models"], "offsets": [162, 163, 164, 165]}, {"text": "discourse - aware neural summarization model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["discourse", "-", "aware", "neural", "summarization", "model"], "offsets": [75, 76, 77, 78, 79, 80]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [142]}, {"text": "significant margin", "nugget_type": "STR", "argument_type": "Result", "tokens": ["significant", "margin"], "offsets": [153, 154]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [142]}}], "document": ["recently", "bert", "has", "been", "adopted", "for", "document", "encoding", "in", "state", "-", "of", "-", "the", "-", "art", "text", "summarization", "models", ".", "however", ",", "sentence", "-", "based", "extractive", "models", "often", "result", "in", "redundant", "or", "uninformative", "phrases", "in", "the", "extracted", "summaries", ".", "also", ",", "long", "-", "range", "dependencies", "throughout", "a", "document", "are", "not", "well", "captured", "by", "bert", ",", "which", "is", "pre", "-", "trained", "on", "sentence", "pairs", "instead", "of", "documents", ".", "to", "address", "these", "issues", ",", "we", "present", "a", "discourse", "-", "aware", "neural", "summarization", "model", "-", "discobert", ".", "discobert", "extracts", "sub", "-", "sentential", "discourse", "units", "(", "instead", "of", "sentences", ")", "as", "candidates", "for", "extractive", "selection", "on", "a", "finer", "granularity", ".", "to", "capture", "the", "long", "-", "range", "dependencies", "among", "discourse", "units", ",", "structural", "discourse", "graphs", "are", "constructed", "based", "on", "rst", "trees", "and", "coreference", "mentions", ",", "encoded", "with", "graph", "convolutional", "networks", ".", "experiments", "show", "that", "the", "proposed", "model", "outperforms", "state", "-", "of", "-", "the", "-", "art", "methods", "by", "a", "significant", "margin", "on", "popular", "summarization", "benchmarks", "compared", "to", "other", "bert", "-", "base", "models", "."]}, {"venue": "ACL", "title": "Massively Multilingual Transfer for NER", "abstract": "In cross-lingual transfer, NLP models over one or more source languages are applied to a low-resource target language. While most prior work has used a single source model or a few carefully selected models, here we consider a \u201cmassive\u201d setting with many such models. This setting raises the problem of poor transfer, particularly from distant languages. We propose two techniques for modulating the transfer, suitable for zero-shot or few-shot learning, respectively. Evaluating on named entity recognition, we show that our techniques are much more effective than strong baselines, including standard ensembling, and our unsupervised method rivals oracle selection of the single best individual model.", "doc_id": "71365c1c17ba5cd25ff5e5dfc0f6c7f6", "publication_year": 2019, "sentences": ["in cross - lingual transfer , nlp models over one or more source languages are applied to a low - resource target language .", "while most prior work has used a single source model or a few carefully selected models , here we consider a \u201c massive \u201d setting with many such models .", "this setting raises the problem of poor transfer , particularly from distant languages .", "we propose two techniques for modulating the transfer , suitable for zero - shot or few - shot learning , respectively .", "evaluating on named entity recognition , we show that our techniques are much more effective than strong baselines , including standard ensembling , and our unsupervised method rivals oracle selection of the single best individual model ."], "events": [{"event_type": "ITT", "arguments": [{"text": "cross - lingual transfer", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["cross", "-", "lingual", "transfer"], "offsets": [1, 2, 3, 4]}], "trigger": {"text": "applied", "tokens": ["applied"], "offsets": [15]}}, {"event_type": "RWF", "arguments": [{"text": "\u201c massive \u201d setting", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["\u201c", "massive", "\u201d", "setting"], "offsets": [45, 46, 47, 48]}, {"text": "poor transfer", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["poor", "transfer"], "offsets": [60, 61]}], "trigger": {"text": "raises", "tokens": ["raises"], "offsets": [56]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [68]}, {"text": "two techniques", "nugget_type": "APP", "argument_type": "Content", "tokens": ["two", "techniques"], "offsets": [70, 71]}, {"text": "modulating", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["modulating"], "offsets": [73]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [69]}}, {"event_type": "PUR", "arguments": [{"text": "transfer", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["transfer"], "offsets": [75]}], "trigger": {"text": "modulating", "tokens": ["modulating"], "offsets": [73]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [96]}, {"text": "named entity recognition", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["named", "entity", "recognition"], "offsets": [92, 93, 94]}], "trigger": {"text": "evaluating", "tokens": ["evaluating"], "offsets": [90]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [96]}, {"text": "much more effective", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["much", "more", "effective"], "offsets": [102, 103, 104]}, {"text": "rivals", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["rivals"], "offsets": [117]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [97]}}, {"event_type": "CMP", "arguments": [{"text": "two techniques", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["two", "techniques"], "offsets": [70, 71]}, {"text": "strong baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["strong", "baselines"], "offsets": [106, 107]}], "trigger": {"text": "much more effective", "tokens": ["much", "more", "effective"], "offsets": [102, 103, 104]}}, {"event_type": "CMP", "arguments": [{"text": "unsupervised method", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["unsupervised", "method"], "offsets": [115, 116]}, {"text": "single best individual model", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["single", "best", "individual", "model"], "offsets": [122, 123, 124, 125]}], "trigger": {"text": "rivals", "tokens": ["rivals"], "offsets": [117]}}], "document": ["in", "cross", "-", "lingual", "transfer", ",", "nlp", "models", "over", "one", "or", "more", "source", "languages", "are", "applied", "to", "a", "low", "-", "resource", "target", "language", ".", "while", "most", "prior", "work", "has", "used", "a", "single", "source", "model", "or", "a", "few", "carefully", "selected", "models", ",", "here", "we", "consider", "a", "\u201c", "massive", "\u201d", "setting", "with", "many", "such", "models", ".", "this", "setting", "raises", "the", "problem", "of", "poor", "transfer", ",", "particularly", "from", "distant", "languages", ".", "we", "propose", "two", "techniques", "for", "modulating", "the", "transfer", ",", "suitable", "for", "zero", "-", "shot", "or", "few", "-", "shot", "learning", ",", "respectively", ".", "evaluating", "on", "named", "entity", "recognition", ",", "we", "show", "that", "our", "techniques", "are", "much", "more", "effective", "than", "strong", "baselines", ",", "including", "standard", "ensembling", ",", "and", "our", "unsupervised", "method", "rivals", "oracle", "selection", "of", "the", "single", "best", "individual", "model", "."]}, {"venue": "ACL", "title": "A Model-agnostic Data Manipulation Method for Persona-based Dialogue Generation", "abstract": "Towards building intelligent dialogue agents, there has been a growing interest in introducing explicit personas in generation models. However, with limited persona-based dialogue data at hand, it may be difficult to train a dialogue generation model well. We point out that the data challenges of this generation task lie in two aspects: first, it is expensive to scale up current persona-based dialogue datasets; second, each data sample in this task is more complex to learn with than conventional dialogue data. To alleviate the above data issues, we propose a data manipulation method, which is model-agnostic to be packed with any persona-based dialogue generation model to improve their performance. The original training samples will first be distilled and thus expected to be fitted more easily. Next, we show various effective ways that can diversify such easier distilled data. A given base model will then be trained via the constructed data curricula, i.e. first on augmented distilled samples and then on original ones. Experiments illustrate the superiority of our method with two strong base dialogue models (Transformer encoder-decoder and GPT2).", "doc_id": "96487048992c6461bccafe9d9ad6ed4b", "publication_year": 2022, "sentences": ["towards building intelligent dialogue agents , there has been a growing interest in introducing explicit personas in generation models .", "however , with limited persona - based dialogue data at hand , it may be difficult to train a dialogue generation model well .", "we point out that the data challenges of this generation task lie in two aspects : first , it is expensive to scale up current persona - based dialogue datasets ; second , each data sample in this task is more complex to learn with than conventional dialogue data .", "to alleviate the above data issues , we propose a data manipulation method , which is model - agnostic to be packed with any persona - based dialogue generation model to improve their performance .", "the original training samples will first be distilled and thus expected to be fitted more easily .", "next , we show various effective ways that can diversify such easier distilled data .", "a given base model will then be trained via the constructed data curricula , i . e . first on augmented distilled samples and then on original ones .", "experiments illustrate the superiority of our method with two strong base dialogue models ( transformer encoder - decoder and gpt2 ) ."], "events": [{"event_type": "ITT", "arguments": [{"text": "intelligent dialogue agents", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["intelligent", "dialogue", "agents"], "offsets": [2, 3, 4]}], "trigger": {"text": "building", "tokens": ["building"], "offsets": [1]}}, {"event_type": "RWF", "arguments": [{"text": "limited", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["limited"], "offsets": [23]}], "trigger": {"text": "limited", "tokens": ["limited"], "offsets": [23]}}, {"event_type": "RWF", "arguments": [{"text": "expensive", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["expensive"], "offsets": [64]}, {"text": "persona - based dialogue datasets", "nugget_type": "DST", "argument_type": "Concern", "tokens": ["persona", "-", "based", "dialogue", "datasets"], "offsets": [69, 70, 71, 72, 73]}], "trigger": {"text": "scale up", "tokens": ["scale", "up"], "offsets": [66, 67]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [101]}, {"text": "data manipulation method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["data", "manipulation", "method"], "offsets": [104, 105, 106]}, {"text": "alleviate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["alleviate"], "offsets": [95]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [102]}}, {"event_type": "PUR", "arguments": [{"text": "data issues", "nugget_type": "WEA", "argument_type": "Aim", "tokens": ["data", "issues"], "offsets": [98, 99]}], "trigger": {"text": "alleviate", "tokens": ["alleviate"], "offsets": [95]}}, {"event_type": "WKS", "arguments": [{"text": "original training samples", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["original", "training", "samples"], "offsets": [130, 131, 132]}], "trigger": {"text": "distilled", "tokens": ["distilled"], "offsets": [136]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [148]}, {"text": "various effective ways", "nugget_type": "APP", "argument_type": "Content", "tokens": ["various", "effective", "ways"], "offsets": [150, 151, 152]}, {"text": "diversify", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["diversify"], "offsets": [155]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [149]}}, {"event_type": "FAC", "arguments": [{"text": "with two strong base dialogue models", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "two", "strong", "base", "dialogue", "models"], "offsets": [197, 198, 199, 200, 201, 202]}, {"text": "superiority of our method", "nugget_type": "STR", "argument_type": "Subject", "tokens": ["superiority", "of", "our", "method"], "offsets": [193, 194, 195, 196]}], "trigger": {"text": "illustrate", "tokens": ["illustrate"], "offsets": [191]}}, {"event_type": "RWF", "arguments": [{"text": "data sample", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["data", "sample"], "offsets": [78, 79]}], "trigger": {"text": "more complex to learn", "tokens": ["more", "complex", "to", "learn"], "offsets": [84, 85, 86, 87]}}, {"event_type": "PUR", "arguments": [{"text": "easier distilled data", "nugget_type": "DST", "argument_type": "Aim", "tokens": ["easier", "distilled", "data"], "offsets": [157, 158, 159]}], "trigger": {"text": "diversify", "tokens": ["diversify"], "offsets": [155]}}, {"event_type": "MDS", "arguments": [{"text": "given base model", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["given", "base", "model"], "offsets": [162, 163, 164]}, {"text": "constructed data curricula", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["constructed", "data", "curricula"], "offsets": [171, 172, 173]}], "trigger": {"text": "trained", "tokens": ["trained"], "offsets": [168]}}], "document": ["towards", "building", "intelligent", "dialogue", "agents", ",", "there", "has", "been", "a", "growing", "interest", "in", "introducing", "explicit", "personas", "in", "generation", "models", ".", "however", ",", "with", "limited", "persona", "-", "based", "dialogue", "data", "at", "hand", ",", "it", "may", "be", "difficult", "to", "train", "a", "dialogue", "generation", "model", "well", ".", "we", "point", "out", "that", "the", "data", "challenges", "of", "this", "generation", "task", "lie", "in", "two", "aspects", ":", "first", ",", "it", "is", "expensive", "to", "scale", "up", "current", "persona", "-", "based", "dialogue", "datasets", ";", "second", ",", "each", "data", "sample", "in", "this", "task", "is", "more", "complex", "to", "learn", "with", "than", "conventional", "dialogue", "data", ".", "to", "alleviate", "the", "above", "data", "issues", ",", "we", "propose", "a", "data", "manipulation", "method", ",", "which", "is", "model", "-", "agnostic", "to", "be", "packed", "with", "any", "persona", "-", "based", "dialogue", "generation", "model", "to", "improve", "their", "performance", ".", "the", "original", "training", "samples", "will", "first", "be", "distilled", "and", "thus", "expected", "to", "be", "fitted", "more", "easily", ".", "next", ",", "we", "show", "various", "effective", "ways", "that", "can", "diversify", "such", "easier", "distilled", "data", ".", "a", "given", "base", "model", "will", "then", "be", "trained", "via", "the", "constructed", "data", "curricula", ",", "i", ".", "e", ".", "first", "on", "augmented", "distilled", "samples", "and", "then", "on", "original", "ones", ".", "experiments", "illustrate", "the", "superiority", "of", "our", "method", "with", "two", "strong", "base", "dialogue", "models", "(", "transformer", "encoder", "-", "decoder", "and", "gpt2", ")", "."]}, {"venue": "ACL", "title": "Nested Named Entity Recognition as Latent Lexicalized Constituency Parsing", "abstract": "Nested named entity recognition (NER) has been receiving increasing attention. Recently, Fu et al. (2020) adapt a span-based constituency parser to tackle nested NER. They treat nested entities as partially-observed constituency trees and propose the masked inside algorithm for partial marginalization. However, their method cannot leverage entity heads, which have been shown useful in entity mention detection and entity typing. In this work, we resort to more expressive structures, lexicalized constituency trees in which constituents are annotated by headwords, to model nested entities. We leverage the Eisner-Satta algorithm to perform partial marginalization and inference efficiently.In addition, we propose to use (1) a two-stage strategy (2) a head regularization loss and (3) a head-aware labeling loss in order to enhance the performance. We make a thorough ablation study to investigate the functionality of each component. Experimentally, our method achieves the state-of-the-art performance on ACE2004, ACE2005 and NNE, and competitive performance on GENIA, and meanwhile has a fast inference speed.", "doc_id": "ee6a090f552b9951e3c99702f7373c79", "publication_year": 2022, "sentences": ["nested named entity recognition ( ner ) has been receiving increasing attention .", "recently , fu et al . ( 2020 ) adapt a span - based constituency parser to tackle nested ner .", "they treat nested entities as partially - observed constituency trees and propose the masked inside algorithm for partial marginalization .", "however , their method cannot leverage entity heads , which have been shown useful in entity mention detection and entity typing .", "in this work , we resort to more expressive structures , lexicalized constituency trees in which constituents are annotated by headwords , to model nested entities .", "we leverage the eisner - satta algorithm to perform partial marginalization and inference efficiently .", "in addition , we propose to use ( 1 ) a two - stage strategy ( 2 ) a head regularization loss and ( 3 ) a head - aware labeling loss in order to enhance the performance .", "we make a thorough ablation study to investigate the functionality of each component .", "experimentally , our method achieves the state - of - the - art performance on ace2004 , ace2005 and nne , and competitive performance on genia , and meanwhile has a fast inference speed ."], "events": [{"event_type": "ITT", "arguments": [{"text": "nested named entity recognition", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["nested", "named", "entity", "recognition"], "offsets": [0, 1, 2, 3]}], "trigger": {"text": "receiving", "tokens": ["receiving"], "offsets": [9]}}, {"event_type": "RWS", "arguments": [{"text": "nested entities", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["nested", "entities"], "offsets": [36, 37]}, {"text": "partially - observed constituency trees", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["partially", "-", "observed", "constituency", "trees"], "offsets": [39, 40, 41, 42, 43]}], "trigger": {"text": "treat", "tokens": ["treat"], "offsets": [35]}}, {"event_type": "RWS", "arguments": [{"text": "masked inside algorithm", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["masked", "inside", "algorithm"], "offsets": [47, 48, 49]}, {"text": "partial marginalization", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["partial", "marginalization"], "offsets": [51, 52]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [45]}}, {"event_type": "RWF", "arguments": [{"text": "method", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["method"], "offsets": [57]}, {"text": "cannot leverage", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["cannot", "leverage"], "offsets": [58, 59]}], "trigger": {"text": "cannot leverage", "tokens": ["cannot", "leverage"], "offsets": [58, 59]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [80]}, {"text": "lexicalized constituency trees", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["lexicalized", "constituency", "trees"], "offsets": [87, 88, 89]}], "trigger": {"text": "resort", "tokens": ["resort"], "offsets": [81]}}, {"event_type": "MDS", "arguments": [{"text": "constituents", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["constituents"], "offsets": [92]}, {"text": "headwords", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["headwords"], "offsets": [96]}, {"text": "model", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["model"], "offsets": [99]}], "trigger": {"text": "annotated", "tokens": ["annotated"], "offsets": [94]}}, {"event_type": "PUR", "arguments": [{"text": "nested entities", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["nested", "entities"], "offsets": [100, 101]}], "trigger": {"text": "model", "tokens": ["model"], "offsets": [99]}}, {"event_type": "MDS", "arguments": [{"text": "eisner - satta algorithm", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["eisner", "-", "satta", "algorithm"], "offsets": [106, 107, 108, 109]}, {"text": "perform", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["perform"], "offsets": [111]}], "trigger": {"text": "leverage", "tokens": ["leverage"], "offsets": [104]}}, {"event_type": "PUR", "arguments": [{"text": "partial marginalization", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["partial", "marginalization"], "offsets": [112, 113]}, {"text": "inference", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["inference"], "offsets": [115]}], "trigger": {"text": "perform", "tokens": ["perform"], "offsets": [111]}}, {"event_type": "PUR", "arguments": [{"text": "performance", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["performance"], "offsets": [155]}], "trigger": {"text": "enhance", "tokens": ["enhance"], "offsets": [153]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [157]}, {"text": "investigate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["investigate"], "offsets": [164]}, {"text": "thorough ablation study", "nugget_type": "APP", "argument_type": "Content", "tokens": ["thorough", "ablation", "study"], "offsets": [160, 161, 162]}], "trigger": {"text": "make", "tokens": ["make"], "offsets": [158]}}, {"event_type": "PUR", "arguments": [{"text": "functionality of each component", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["functionality", "of", "each", "component"], "offsets": [166, 167, 168, 169]}], "trigger": {"text": "investigate", "tokens": ["investigate"], "offsets": [164]}}, {"event_type": "FAC", "arguments": [{"text": "ace2004", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["ace2004"], "offsets": [186]}, {"text": "ace2005", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["ace2005"], "offsets": [188]}, {"text": "nne", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["nne"], "offsets": [190]}, {"text": "state - of - the - art performance", "nugget_type": "STR", "argument_type": "Subject", "tokens": ["state", "-", "of", "-", "the", "-", "art", "performance"], "offsets": [177, 178, 179, 180, 181, 182, 183, 184]}, {"text": "competitive performance", "nugget_type": "STR", "argument_type": "Subject", "tokens": ["competitive", "performance"], "offsets": [193, 194]}, {"text": "genia", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["genia"], "offsets": [196]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [175]}}, {"event_type": "FAC", "arguments": [{"text": "method", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["method"], "offsets": [174]}, {"text": "fast inference speed", "nugget_type": "STR", "argument_type": "Object", "tokens": ["fast", "inference", "speed"], "offsets": [202, 203, 204]}], "trigger": {"text": "has", "tokens": ["has"], "offsets": [200]}}, {"event_type": "WKS", "arguments": [{"text": "two - stage strategy", "nugget_type": "APP", "argument_type": "Content", "tokens": ["two", "-", "stage", "strategy"], "offsets": [129, 130, 131, 132]}, {"text": "head regularization loss", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["head", "regularization", "loss"], "offsets": [137, 138, 139]}, {"text": "head - aware labeling loss", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["head", "-", "aware", "labeling", "loss"], "offsets": [145, 146, 147, 148, 149]}, {"text": "enhance", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["enhance"], "offsets": [153]}, {"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [121]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [124]}}], "document": ["nested", "named", "entity", "recognition", "(", "ner", ")", "has", "been", "receiving", "increasing", "attention", ".", "recently", ",", "fu", "et", "al", ".", "(", "2020", ")", "adapt", "a", "span", "-", "based", "constituency", "parser", "to", "tackle", "nested", "ner", ".", "they", "treat", "nested", "entities", "as", "partially", "-", "observed", "constituency", "trees", "and", "propose", "the", "masked", "inside", "algorithm", "for", "partial", "marginalization", ".", "however", ",", "their", "method", "cannot", "leverage", "entity", "heads", ",", "which", "have", "been", "shown", "useful", "in", "entity", "mention", "detection", "and", "entity", "typing", ".", "in", "this", "work", ",", "we", "resort", "to", "more", "expressive", "structures", ",", "lexicalized", "constituency", "trees", "in", "which", "constituents", "are", "annotated", "by", "headwords", ",", "to", "model", "nested", "entities", ".", "we", "leverage", "the", "eisner", "-", "satta", "algorithm", "to", "perform", "partial", "marginalization", "and", "inference", "efficiently", ".", "in", "addition", ",", "we", "propose", "to", "use", "(", "1", ")", "a", "two", "-", "stage", "strategy", "(", "2", ")", "a", "head", "regularization", "loss", "and", "(", "3", ")", "a", "head", "-", "aware", "labeling", "loss", "in", "order", "to", "enhance", "the", "performance", ".", "we", "make", "a", "thorough", "ablation", "study", "to", "investigate", "the", "functionality", "of", "each", "component", ".", "experimentally", ",", "our", "method", "achieves", "the", "state", "-", "of", "-", "the", "-", "art", "performance", "on", "ace2004", ",", "ace2005", "and", "nne", ",", "and", "competitive", "performance", "on", "genia", ",", "and", "meanwhile", "has", "a", "fast", "inference", "speed", "."]}, {"venue": "ACL", "title": "Masking Actor Information Leads to Fairer Political Claims Detection", "abstract": "A central concern in Computational Social Sciences (CSS) is fairness: where the role of NLP is to scale up text analysis to large corpora, the quality of automatic analyses should be as independent as possible of textual properties. We analyze the performance of a state-of-the-art neural model on the task of political claims detection (i.e., the identification of forward-looking statements made by political actors) and identify a strong frequency bias: claims made by frequent actors are recognized better. We propose two simple debiasing methods which mask proper names and pronouns during training of the model, thus removing personal information bias. We find that (a) these methods significantly decrease frequency bias while keeping the overall performance stable; and (b) the resulting models improve when evaluated in an out-of-domain setting.", "doc_id": "ffe72c2b2dcb7cfc3198fb039b080230", "publication_year": 2020, "sentences": ["a central concern in computational social sciences ( css ) is fairness : where the role of nlp is to scale up text analysis to large corpora , the quality of automatic analyses should be as independent as possible of textual properties .", "we analyze the performance of a state - of - the - art neural model on the task of political claims detection ( i . e . , the identification of forward - looking statements made by political actors ) and identify a strong frequency bias : claims made by frequent actors are recognized better .", "we propose two simple debiasing methods which mask proper names and pronouns during training of the model , thus removing personal information bias .", "we find that ( a ) these methods significantly decrease frequency bias while keeping the overall performance stable ; and ( b ) the resulting models improve when evaluated in an out - of - domain setting ."], "events": [{"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [43]}, {"text": "performance of a state - of - the - art neural model", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["performance", "of", "a", "state", "-", "of", "-", "the", "-", "art", "neural", "model"], "offsets": [46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57]}, {"text": "political claims detection", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["political", "claims", "detection"], "offsets": [62, 63, 64]}], "trigger": {"text": "analyze", "tokens": ["analyze"], "offsets": [44]}}, {"event_type": "ITT", "arguments": [{"text": "computational social sciences", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["computational", "social", "sciences"], "offsets": [4, 5, 6]}], "trigger": {"text": "fairness", "tokens": ["fairness"], "offsets": [11]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [99]}, {"text": "two simple debiasing methods", "nugget_type": "APP", "argument_type": "Content", "tokens": ["two", "simple", "debiasing", "methods"], "offsets": [101, 102, 103, 104]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [100]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [123]}, {"text": "significantly decrease", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["significantly", "decrease"], "offsets": [131, 132]}, {"text": "improve", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["improve"], "offsets": [149]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [124]}}, {"event_type": "FAC", "arguments": [{"text": "two simple debiasing methods", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["two", "simple", "debiasing", "methods"], "offsets": [101, 102, 103, 104]}, {"text": "frequency bias", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["frequency", "bias"], "offsets": [133, 134]}, {"text": "while keeping the overall performance stable", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["while", "keeping", "the", "overall", "performance", "stable"], "offsets": [135, 136, 137, 138, 139, 140]}], "trigger": {"text": "significantly decrease", "tokens": ["significantly", "decrease"], "offsets": [131, 132]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [43]}, {"text": "strong frequency bias", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["strong", "frequency", "bias"], "offsets": [86, 87, 88]}], "trigger": {"text": "identify", "tokens": ["identify"], "offsets": [84]}}, {"event_type": "MDS", "arguments": [{"text": "during training of the model", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["during", "training", "of", "the", "model"], "offsets": [111, 112, 113, 114, 115]}, {"text": "proper names and pronouns", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["proper", "names", "and", "pronouns"], "offsets": [107, 108, 109, 110]}, {"text": "removing", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["removing"], "offsets": [118]}], "trigger": {"text": "mask", "tokens": ["mask"], "offsets": [106]}}, {"event_type": "PUR", "arguments": [{"text": "personal information bias", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["personal", "information", "bias"], "offsets": [119, 120, 121]}], "trigger": {"text": "removing", "tokens": ["removing"], "offsets": [118]}}, {"event_type": "FAC", "arguments": [{"text": "when evaluated in an out - of - domain setting", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "evaluated", "in", "an", "out", "-", "of", "-", "domain", "setting"], "offsets": [150, 151, 152, 153, 154, 155, 156, 157, 158, 159]}, {"text": "resulting models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["resulting", "models"], "offsets": [147, 148]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [149]}}], "document": ["a", "central", "concern", "in", "computational", "social", "sciences", "(", "css", ")", "is", "fairness", ":", "where", "the", "role", "of", "nlp", "is", "to", "scale", "up", "text", "analysis", "to", "large", "corpora", ",", "the", "quality", "of", "automatic", "analyses", "should", "be", "as", "independent", "as", "possible", "of", "textual", "properties", ".", "we", "analyze", "the", "performance", "of", "a", "state", "-", "of", "-", "the", "-", "art", "neural", "model", "on", "the", "task", "of", "political", "claims", "detection", "(", "i", ".", "e", ".", ",", "the", "identification", "of", "forward", "-", "looking", "statements", "made", "by", "political", "actors", ")", "and", "identify", "a", "strong", "frequency", "bias", ":", "claims", "made", "by", "frequent", "actors", "are", "recognized", "better", ".", "we", "propose", "two", "simple", "debiasing", "methods", "which", "mask", "proper", "names", "and", "pronouns", "during", "training", "of", "the", "model", ",", "thus", "removing", "personal", "information", "bias", ".", "we", "find", "that", "(", "a", ")", "these", "methods", "significantly", "decrease", "frequency", "bias", "while", "keeping", "the", "overall", "performance", "stable", ";", "and", "(", "b", ")", "the", "resulting", "models", "improve", "when", "evaluated", "in", "an", "out", "-", "of", "-", "domain", "setting", "."]}, {"venue": "ACL", "title": "The patient is more dead than alive: exploring the current state of the multi-document summarisation of the biomedical literature", "abstract": "Although multi-document summarisation (MDS) of the biomedical literature is a highly valuable task that has recently attracted substantial interest, evaluation of the quality of biomedical summaries lacks consistency and transparency. In this paper, we examine the summaries generated by two current models in order to understand the deficiencies of existing evaluation approaches in the context of the challenges that arise in the MDS task. Based on this analysis, we propose a new approach to human evaluation and identify several challenges that must be overcome to develop effective biomedical MDS systems.", "doc_id": "3fcc85d18397dd8acdcc63a844f2bdb4", "publication_year": 2022, "sentences": ["although multi - document summarisation ( mds ) of the biomedical literature is a highly valuable task that has recently attracted substantial interest , evaluation of the quality of biomedical summaries lacks consistency and transparency .", "in this paper , we examine the summaries generated by two current models in order to understand the deficiencies of existing evaluation approaches in the context of the challenges that arise in the mds task .", "based on this analysis , we propose a new approach to human evaluation and identify several challenges that must be overcome to develop effective biomedical mds systems ."], "events": [{"event_type": "ITT", "arguments": [{"text": "multi - document summarisation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["multi", "-", "document", "summarisation"], "offsets": [1, 2, 3, 4]}], "trigger": {"text": "task", "tokens": ["task"], "offsets": [16]}}, {"event_type": "RWF", "arguments": [{"text": "consistency and transparency", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["consistency", "and", "transparency"], "offsets": [32, 33, 34]}, {"text": "evaluation of the quality of biomedical summaries", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["evaluation", "of", "the", "quality", "of", "biomedical", "summaries"], "offsets": [24, 25, 26, 27, 28, 29, 30]}], "trigger": {"text": "lacks", "tokens": ["lacks"], "offsets": [31]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [40]}, {"text": "summaries generated by two current models", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["summaries", "generated", "by", "two", "current", "models"], "offsets": [43, 44, 45, 46, 47, 48]}, {"text": "understand", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["understand"], "offsets": [52]}], "trigger": {"text": "examine", "tokens": ["examine"], "offsets": [41]}}, {"event_type": "PUR", "arguments": [{"text": "deficiencies of existing evaluation approaches", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["deficiencies", "of", "existing", "evaluation", "approaches"], "offsets": [54, 55, 56, 57, 58]}, {"text": "in the context of the challenges that arise in the mds task", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "context", "of", "the", "challenges", "that", "arise", "in", "the", "multi", "-", "document", "summarisation", "task"], "offsets": [59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 1, 2, 3, 4, 70]}], "trigger": {"text": "understand", "tokens": ["understand"], "offsets": [52]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [77]}, {"text": "new approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["new", "approach"], "offsets": [80, 81]}, {"text": "human evaluation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["human", "evaluation"], "offsets": [83, 84]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [78]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [77]}, {"text": "several challenges", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["several", "challenges"], "offsets": [87, 88]}, {"text": "develop", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["develop"], "offsets": [94]}], "trigger": {"text": "identify", "tokens": ["identify"], "offsets": [86]}}, {"event_type": "PUR", "arguments": [{"text": "effective biomedical mds systems", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["effective", "biomedical", "multi", "-", "document", "summarisation", "systems"], "offsets": [95, 96, 1, 2, 3, 4, 98]}], "trigger": {"text": "develop", "tokens": ["develop"], "offsets": [94]}}], "document": ["although", "multi", "-", "document", "summarisation", "(", "mds", ")", "of", "the", "biomedical", "literature", "is", "a", "highly", "valuable", "task", "that", "has", "recently", "attracted", "substantial", "interest", ",", "evaluation", "of", "the", "quality", "of", "biomedical", "summaries", "lacks", "consistency", "and", "transparency", ".", "in", "this", "paper", ",", "we", "examine", "the", "summaries", "generated", "by", "two", "current", "models", "in", "order", "to", "understand", "the", "deficiencies", "of", "existing", "evaluation", "approaches", "in", "the", "context", "of", "the", "challenges", "that", "arise", "in", "the", "mds", "task", ".", "based", "on", "this", "analysis", ",", "we", "propose", "a", "new", "approach", "to", "human", "evaluation", "and", "identify", "several", "challenges", "that", "must", "be", "overcome", "to", "develop", "effective", "biomedical", "mds", "systems", "."]}, {"venue": "ACL", "title": "Careful Selection of Knowledge to Solve Open Book Question Answering", "abstract": "Open book question answering is a type of natural language based QA (NLQA) where questions are expected to be answered with respect to a given set of open book facts, and common knowledge about a topic. Recently a challenge involving such QA, OpenBookQA, has been proposed. Unlike most other NLQA that focus on linguistic understanding, OpenBookQA requires deeper reasoning involving linguistic understanding as well as reasoning with common knowledge. In this paper we address QA with respect to the OpenBookQA dataset and combine state of the art language models with abductive information retrieval (IR), information gain based re-ranking, passage selection and weighted scoring to achieve 72.0% accuracy, an 11.6% improvement over the current state of the art.", "doc_id": "16f4868e97fe3cf8dcf09a4465793f63", "publication_year": 2019, "sentences": ["open book question answering is a type of natural language based qa ( nlqa ) where questions are expected to be answered with respect to a given set of open book facts , and common knowledge about a topic .", "recently a challenge involving such qa , openbookqa , has been proposed .", "unlike most other nlqa that focus on linguistic understanding , openbookqa requires deeper reasoning involving linguistic understanding as well as reasoning with common knowledge .", "in this paper we address qa with respect to the openbookqa dataset and combine state of the art language models with abductive information retrieval ( ir ) , information gain based re - ranking , passage selection and weighted scoring to achieve 72 . 0 % accuracy , an 11 . 6 % improvement over the current state of the art ."], "events": [{"event_type": "ITT", "arguments": [{"text": "open book question answering", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["open", "book", "question", "answering"], "offsets": [0, 1, 2, 3]}], "trigger": {"text": "type", "tokens": ["type"], "offsets": [6]}}, {"event_type": "RWF", "arguments": [{"text": "deeper reasoning", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["deeper", "reasoning"], "offsets": [65, 66]}, {"text": "open book question answering", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["open", "book", "question", "answering"], "offsets": [0, 1, 2, 3]}], "trigger": {"text": "requires", "tokens": ["requires"], "offsets": [64]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [81]}, {"text": "openbookqa dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["openbookqa", "dataset"], "offsets": [88, 89]}, {"text": "question answering", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["question", "answering"], "offsets": [2, 3]}], "trigger": {"text": "address", "tokens": ["address"], "offsets": [82]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [81]}, {"text": "state of the art language models", "nugget_type": "APP", "argument_type": "Content", "tokens": ["state", "of", "the", "art", "language", "models"], "offsets": [92, 93, 94, 95, 96, 97]}, {"text": "abductive information retrieval", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["abductive", "information", "retrieval"], "offsets": [99, 100, 101]}, {"text": "information gain based re - ranking", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["information", "gain", "based", "re", "-", "ranking"], "offsets": [106, 107, 108, 109, 110, 111]}, {"text": "passage selection", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["passage", "selection"], "offsets": [113, 114]}, {"text": "weighted scoring", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["weighted", "scoring"], "offsets": [116, 117]}], "trigger": {"text": "combine", "tokens": ["combine"], "offsets": [91]}}, {"event_type": "CMP", "arguments": [{"text": "72 . 0 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["72", ".", "0", "%"], "offsets": [120, 121, 122, 123]}, {"text": "accuracy", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["accuracy"], "offsets": [124]}, {"text": "improvement", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improvement"], "offsets": [131]}, {"text": "current state of the art", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["current", "state", "of", "the", "art"], "offsets": [134, 135, 136, 137, 138]}], "trigger": {"text": "achieve", "tokens": ["achieve"], "offsets": [119]}}, {"event_type": "CMP", "arguments": [{"text": "improvement", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improvement"], "offsets": [131]}, {"text": "11 . 6 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["11", ".", "6", "%"], "offsets": [127, 128, 129, 130]}, {"text": "current state of the art", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["current", "state", "of", "the", "art"], "offsets": [134, 135, 136, 137, 138]}], "trigger": {"text": "improvement", "tokens": ["improvement"], "offsets": [131]}}], "document": ["open", "book", "question", "answering", "is", "a", "type", "of", "natural", "language", "based", "qa", "(", "nlqa", ")", "where", "questions", "are", "expected", "to", "be", "answered", "with", "respect", "to", "a", "given", "set", "of", "open", "book", "facts", ",", "and", "common", "knowledge", "about", "a", "topic", ".", "recently", "a", "challenge", "involving", "such", "qa", ",", "openbookqa", ",", "has", "been", "proposed", ".", "unlike", "most", "other", "nlqa", "that", "focus", "on", "linguistic", "understanding", ",", "openbookqa", "requires", "deeper", "reasoning", "involving", "linguistic", "understanding", "as", "well", "as", "reasoning", "with", "common", "knowledge", ".", "in", "this", "paper", "we", "address", "qa", "with", "respect", "to", "the", "openbookqa", "dataset", "and", "combine", "state", "of", "the", "art", "language", "models", "with", "abductive", "information", "retrieval", "(", "ir", ")", ",", "information", "gain", "based", "re", "-", "ranking", ",", "passage", "selection", "and", "weighted", "scoring", "to", "achieve", "72", ".", "0", "%", "accuracy", ",", "an", "11", ".", "6", "%", "improvement", "over", "the", "current", "state", "of", "the", "art", "."]}, {"venue": "ACL", "title": "KinyaBERT: a Morphology-aware Kinyarwanda Language Model", "abstract": "Pre-trained language models such as BERT have been successful at tackling many natural language processing tasks. However, the unsupervised sub-word tokenization methods commonly used in these models (e.g., byte-pair encoding - BPE) are sub-optimal at handling morphologically rich languages. Even given a morphological analyzer, naive sequencing of morphemes into a standard BERT architecture is inefficient at capturing morphological compositionality and expressing word-relative syntactic regularities. We address these challenges by proposing a simple yet effective two-tier BERT architecture that leverages a morphological analyzer and explicitly represents morphological compositionality.Despite the success of BERT, most of its evaluations have been conducted on high-resource languages, obscuring its applicability on low-resource languages. We evaluate our proposed method on the low-resource morphologically rich Kinyarwanda language, naming the proposed model architecture KinyaBERT. A robust set of experimental results reveal that KinyaBERT outperforms solid baselines by 2% in F1 score on a named entity recognition task and by 4.3% in average score of a machine-translated GLUE benchmark. KinyaBERT fine-tuning has better convergence and achieves more robust results on multiple tasks even in the presence of translation noise.", "doc_id": "951e575d891235a7f3dd38e48dcb9d2f", "publication_year": 2022, "sentences": ["pre - trained language models such as bert have been successful at tackling many natural language processing tasks .", "however , the unsupervised sub - word tokenization methods commonly used in these models ( e . g . , byte - pair encoding - bpe ) are sub - optimal at handling morphologically rich languages .", "even given a morphological analyzer , naive sequencing of morphemes into a standard bert architecture is inefficient at capturing morphological compositionality and expressing word - relative syntactic regularities .", "we address these challenges by proposing a simple yet effective two - tier bert architecture that leverages a morphological analyzer and explicitly represents morphological compositionality .", "despite the success of bert , most of its evaluations have been conducted on high - resource languages , obscuring its applicability on low - resource languages .", "we evaluate our proposed method on the low - resource morphologically rich kinyarwanda language , naming the proposed model architecture kinyabert .", "a robust set of experimental results reveal that kinyabert outperforms solid baselines by 2 % in f1 score on a named entity recognition task and by 4 . 3 % in average score of a machine - translated glue benchmark .", "kinyabert fine - tuning has better convergence and achieves more robust results on multiple tasks even in the presence of translation noise ."], "events": [{"event_type": "ITT", "arguments": [{"text": "natural language processing tasks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["natural", "language", "processing", "tasks"], "offsets": [14, 15, 16, 17]}], "trigger": {"text": "have", "tokens": ["have"], "offsets": [8]}}, {"event_type": "RWF", "arguments": [{"text": "unsupervised sub - word tokenization methods", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["unsupervised", "sub", "-", "word", "tokenization", "methods"], "offsets": [22, 23, 24, 25, 26, 27]}, {"text": "sub - optimal", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["sub", "-", "optimal"], "offsets": [47, 48, 49]}], "trigger": {"text": "sub - optimal", "tokens": ["sub", "-", "optimal"], "offsets": [47, 48, 49]}}, {"event_type": "PUR", "arguments": [{"text": "morphologically rich languages", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["morphologically", "rich", "languages"], "offsets": [52, 53, 54]}], "trigger": {"text": "handling", "tokens": ["handling"], "offsets": [51]}}, {"event_type": "RWS", "arguments": [{"text": "morphological analyzer", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["morphological", "analyzer"], "offsets": [59, 60]}, {"text": "morphemes", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["morphemes"], "offsets": [65]}, {"text": "standard bert architecture", "nugget_type": "APP", "argument_type": "Target", "tokens": ["standard", "bert", "architecture"], "offsets": [68, 69, 70]}], "trigger": {"text": "sequencing", "tokens": ["sequencing"], "offsets": [63]}}, {"event_type": "RWF", "arguments": [{"text": "inefficient", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["inefficient"], "offsets": [72]}, {"text": "capturing", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["capturing"], "offsets": [74]}], "trigger": {"text": "inefficient", "tokens": ["inefficient"], "offsets": [72]}}, {"event_type": "PUR", "arguments": [{"text": "morphological compositionality", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["morphological", "compositionality"], "offsets": [75, 76]}], "trigger": {"text": "capturing", "tokens": ["capturing"], "offsets": [74]}}, {"event_type": "PUR", "arguments": [{"text": "word - relative syntactic regularities", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["word", "-", "relative", "syntactic", "regularities"], "offsets": [79, 80, 81, 82, 83]}], "trigger": {"text": "expressing", "tokens": ["expressing"], "offsets": [78]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [85]}, {"text": "two - tier bert architecture", "nugget_type": "APP", "argument_type": "Content", "tokens": ["two", "-", "tier", "bert", "architecture"], "offsets": [95, 96, 97, 98, 99]}, {"text": "leverages", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["leverages"], "offsets": [101]}, {"text": "represents", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["represents"], "offsets": [107]}], "trigger": {"text": "proposing", "tokens": ["proposing"], "offsets": [90]}}, {"event_type": "PUR", "arguments": [{"text": "morphological analyzer", "nugget_type": "MOD", "argument_type": "Aim", "tokens": ["morphological", "analyzer"], "offsets": [103, 104]}], "trigger": {"text": "leverages", "tokens": ["leverages"], "offsets": [101]}}, {"event_type": "PUR", "arguments": [{"text": "morphological compositionality", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["morphological", "compositionality"], "offsets": [108, 109]}], "trigger": {"text": "represents", "tokens": ["represents"], "offsets": [107]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [139]}, {"text": "two - tier bert architecture", "nugget_type": "APP", "argument_type": "Content", "tokens": ["two", "-", "tier", "bert", "architecture"], "offsets": [95, 96, 97, 98, 99]}, {"text": "on the low - resource morphologically rich kinyarwanda language", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "the", "low", "-", "resource", "morphologically", "rich", "kinyarwanda", "language"], "offsets": [144, 145, 146, 147, 148, 149, 150, 151, 152]}], "trigger": {"text": "evaluate", "tokens": ["evaluate"], "offsets": [140]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [139]}, {"text": "proposed model architecture", "nugget_type": "APP", "argument_type": "Content", "tokens": ["proposed", "model", "architecture"], "offsets": [156, 157, 158]}, {"text": "kinyabert", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["kinyabert"], "offsets": [159]}], "trigger": {"text": "naming", "tokens": ["naming"], "offsets": [154]}}, {"event_type": "CMP", "arguments": [{"text": "kinyabert", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["kinyabert"], "offsets": [169]}, {"text": "solid baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["solid", "baselines"], "offsets": [171, 172]}, {"text": "2 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["2", "%"], "offsets": [174, 175]}, {"text": "f1 score", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["f1", "score"], "offsets": [177, 178]}, {"text": "on a named entity recognition task", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "a", "named", "entity", "recognition", "task"], "offsets": [179, 180, 181, 182, 183, 184]}, {"text": "4 . 3 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["4", ".", "3", "%"], "offsets": [187, 188, 189, 190]}, {"text": "average score", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["average", "score"], "offsets": [192, 193]}, {"text": "of a machine - translated glue benchmark", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["of", "a", "machine", "-", "translated", "glue", "benchmark"], "offsets": [194, 195, 196, 197, 198, 199, 200]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [170]}}, {"event_type": "FAC", "arguments": [{"text": "kinyabert fine - tuning", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["kinyabert", "fine", "-", "tuning"], "offsets": [202, 203, 204, 205]}, {"text": "better convergence", "nugget_type": "STR", "argument_type": "Object", "tokens": ["better", "convergence"], "offsets": [207, 208]}], "trigger": {"text": "has", "tokens": ["has"], "offsets": [206]}}, {"event_type": "FAC", "arguments": [{"text": "kinyabert fine - tuning", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["kinyabert", "fine", "-", "tuning"], "offsets": [202, 203, 204, 205]}, {"text": "more robust results", "nugget_type": "STR", "argument_type": "Object", "tokens": ["more", "robust", "results"], "offsets": [211, 212, 213]}, {"text": "multiple tasks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["multiple", "tasks"], "offsets": [215, 216]}, {"text": "in the presence of translation noise", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "presence", "of", "translation", "noise"], "offsets": [218, 219, 220, 221, 222, 223]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [210]}}], "document": ["pre", "-", "trained", "language", "models", "such", "as", "bert", "have", "been", "successful", "at", "tackling", "many", "natural", "language", "processing", "tasks", ".", "however", ",", "the", "unsupervised", "sub", "-", "word", "tokenization", "methods", "commonly", "used", "in", "these", "models", "(", "e", ".", "g", ".", ",", "byte", "-", "pair", "encoding", "-", "bpe", ")", "are", "sub", "-", "optimal", "at", "handling", "morphologically", "rich", "languages", ".", "even", "given", "a", "morphological", "analyzer", ",", "naive", "sequencing", "of", "morphemes", "into", "a", "standard", "bert", "architecture", "is", "inefficient", "at", "capturing", "morphological", "compositionality", "and", "expressing", "word", "-", "relative", "syntactic", "regularities", ".", "we", "address", "these", "challenges", "by", "proposing", "a", "simple", "yet", "effective", "two", "-", "tier", "bert", "architecture", "that", "leverages", "a", "morphological", "analyzer", "and", "explicitly", "represents", "morphological", "compositionality", ".", "despite", "the", "success", "of", "bert", ",", "most", "of", "its", "evaluations", "have", "been", "conducted", "on", "high", "-", "resource", "languages", ",", "obscuring", "its", "applicability", "on", "low", "-", "resource", "languages", ".", "we", "evaluate", "our", "proposed", "method", "on", "the", "low", "-", "resource", "morphologically", "rich", "kinyarwanda", "language", ",", "naming", "the", "proposed", "model", "architecture", "kinyabert", ".", "a", "robust", "set", "of", "experimental", "results", "reveal", "that", "kinyabert", "outperforms", "solid", "baselines", "by", "2", "%", "in", "f1", "score", "on", "a", "named", "entity", "recognition", "task", "and", "by", "4", ".", "3", "%", "in", "average", "score", "of", "a", "machine", "-", "translated", "glue", "benchmark", ".", "kinyabert", "fine", "-", "tuning", "has", "better", "convergence", "and", "achieves", "more", "robust", "results", "on", "multiple", "tasks", "even", "in", "the", "presence", "of", "translation", "noise", "."]}, {"venue": "ACL", "title": "CIL: Contrastive Instance Learning Framework for Distantly Supervised Relation Extraction", "abstract": "The journey of reducing noise from distant supervision (DS) generated training data has been started since the DS was first introduced into the relation extraction (RE) task. For the past decade, researchers apply the multi-instance learning (MIL) framework to find the most reliable feature from a bag of sentences. Although the pattern of MIL bags can greatly reduce DS noise, it fails to represent many other useful sentence features in the datasets. In many cases, these sentence features can only be acquired by extra sentence-level human annotation with heavy costs. Therefore, the performance of distantly supervised RE models is bounded. In this paper, we go beyond typical MIL framework and propose a novel contrastive instance learning (CIL) framework. Specifically, we regard the initial MIL as the relational triple encoder and constraint positive pairs against negative pairs for each instance. Experiments demonstrate the effectiveness of our proposed framework, with significant improvements over the previous methods on NYT10, GDS and KBP.", "doc_id": "014e3abf6474c8b1c5b5fbff67098d1b", "publication_year": 2021, "sentences": ["the journey of reducing noise from distant supervision ( ds ) generated training data has been started since the ds was first introduced into the relation extraction ( re ) task .", "for the past decade , researchers apply the multi - instance learning ( mil ) framework to find the most reliable feature from a bag of sentences .", "although the pattern of mil bags can greatly reduce ds noise , it fails to represent many other useful sentence features in the datasets .", "in many cases , these sentence features can only be acquired by extra sentence - level human annotation with heavy costs .", "therefore , the performance of distantly supervised re models is bounded .", "in this paper , we go beyond typical mil framework and propose a novel contrastive instance learning ( cil ) framework .", "specifically , we regard the initial mil as the relational triple encoder and constraint positive pairs against negative pairs for each instance .", "experiments demonstrate the effectiveness of our proposed framework , with significant improvements over the previous methods on nyt10 , gds and kbp ."], "events": [{"event_type": "ITT", "arguments": [{"text": "multi - instance learning", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["multi", "-", "instance", "learning"], "offsets": [40, 41, 42, 43]}], "trigger": {"text": "apply", "tokens": ["apply"], "offsets": [38]}}, {"event_type": "RWF", "arguments": [{"text": "fails", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["fails"], "offsets": [73]}, {"text": "represent", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["represent"], "offsets": [75]}], "trigger": {"text": "fails", "tokens": ["fails"], "offsets": [73]}}, {"event_type": "RWF", "arguments": [{"text": "sentence features", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["sentence", "features"], "offsets": [90, 91]}, {"text": "extra sentence - level human annotation with heavy costs", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["extra", "sentence", "-", "level", "human", "annotation", "with", "heavy", "costs"], "offsets": [97, 98, 99, 100, 101, 102, 103, 104, 105]}], "trigger": {"text": "acquired", "tokens": ["acquired"], "offsets": [95]}}, {"event_type": "RWF", "arguments": [{"text": "bounded", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["bounded"], "offsets": [117]}], "trigger": {"text": "bounded", "tokens": ["bounded"], "offsets": [117]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [123]}, {"text": "contrastive instance learning ( cil ) framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["contrastive", "instance", "learning", "framework"], "offsets": [133, 134, 135, 139]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [130]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [143]}, {"text": "initial mil", "nugget_type": "APP", "argument_type": "Content", "tokens": ["initial", "mil"], "offsets": [146, 147]}, {"text": "relational triple encoder", "nugget_type": "MOD", "argument_type": "Target", "tokens": ["relational", "triple", "encoder"], "offsets": [150, 151, 152]}], "trigger": {"text": "regard", "tokens": ["regard"], "offsets": [144]}}, {"event_type": "FIN", "arguments": [{"text": "significant improvements", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["significant", "improvements"], "offsets": [174, 175]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [165]}}, {"event_type": "CMP", "arguments": [{"text": "effectiveness", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["effectiveness"], "offsets": [167]}, {"text": "contrastive instance learning ( cil ) framework", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["contrastive", "instance", "learning", "framework"], "offsets": [133, 134, 135, 139]}, {"text": "significant improvements", "nugget_type": "STR", "argument_type": "Result", "tokens": ["significant", "improvements"], "offsets": [174, 175]}], "trigger": {"text": "significant improvements", "tokens": ["significant", "improvements"], "offsets": [174, 175]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [143]}, {"text": "positive pairs against negative pairs", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["positive", "pairs", "against", "negative", "pairs"], "offsets": [155, 156, 157, 158, 159]}, {"text": "each instance", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["each", "instance"], "offsets": [161, 162]}], "trigger": {"text": "constraint", "tokens": ["constraint"], "offsets": [154]}}, {"event_type": "PUR", "arguments": [{"text": "many other useful sentence features", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["many", "other", "useful", "sentence", "features"], "offsets": [76, 77, 78, 79, 80]}], "trigger": {"text": "represent", "tokens": ["represent"], "offsets": [75]}}], "document": ["the", "journey", "of", "reducing", "noise", "from", "distant", "supervision", "(", "ds", ")", "generated", "training", "data", "has", "been", "started", "since", "the", "ds", "was", "first", "introduced", "into", "the", "relation", "extraction", "(", "re", ")", "task", ".", "for", "the", "past", "decade", ",", "researchers", "apply", "the", "multi", "-", "instance", "learning", "(", "mil", ")", "framework", "to", "find", "the", "most", "reliable", "feature", "from", "a", "bag", "of", "sentences", ".", "although", "the", "pattern", "of", "mil", "bags", "can", "greatly", "reduce", "ds", "noise", ",", "it", "fails", "to", "represent", "many", "other", "useful", "sentence", "features", "in", "the", "datasets", ".", "in", "many", "cases", ",", "these", "sentence", "features", "can", "only", "be", "acquired", "by", "extra", "sentence", "-", "level", "human", "annotation", "with", "heavy", "costs", ".", "therefore", ",", "the", "performance", "of", "distantly", "supervised", "re", "models", "is", "bounded", ".", "in", "this", "paper", ",", "we", "go", "beyond", "typical", "mil", "framework", "and", "propose", "a", "novel", "contrastive", "instance", "learning", "(", "cil", ")", "framework", ".", "specifically", ",", "we", "regard", "the", "initial", "mil", "as", "the", "relational", "triple", "encoder", "and", "constraint", "positive", "pairs", "against", "negative", "pairs", "for", "each", "instance", ".", "experiments", "demonstrate", "the", "effectiveness", "of", "our", "proposed", "framework", ",", "with", "significant", "improvements", "over", "the", "previous", "methods", "on", "nyt10", ",", "gds", "and", "kbp", "."]}, {"venue": "ACL", "title": "Multi-Agent Task-Oriented Dialog Policy Learning with Role-Aware Reward Decomposition", "abstract": "Many studies have applied reinforcement learning to train a dialog policy and show great promise these years. One common approach is to employ a user simulator to obtain a large number of simulated user experiences for reinforcement learning algorithms. However, modeling a realistic user simulator is challenging. A rule-based simulator requires heavy domain expertise for complex tasks, and a data-driven simulator requires considerable data and it is even unclear how to evaluate a simulator. To avoid explicitly building a user simulator beforehand, we propose Multi-Agent Dialog Policy Learning, which regards both the system and the user as the dialog agents. Two agents interact with each other and are jointly learned simultaneously. The method uses the actor-critic framework to facilitate pretraining and improve scalability. We also propose Hybrid Value Network for the role-aware reward decomposition to integrate role-specific domain knowledge of each agent in the task-oriented dialog. Results show that our method can successfully build a system policy and a user policy simultaneously, and two agents can achieve a high task success rate through conversational interaction.", "doc_id": "fdf49fe19f924724f3bcdcd0be2f19a5", "publication_year": 2020, "sentences": ["many studies have applied reinforcement learning to train a dialog policy and show great promise these years .", "one common approach is to employ a user simulator to obtain a large number of simulated user experiences for reinforcement learning algorithms .", "however , modeling a realistic user simulator is challenging .", "a rule - based simulator requires heavy domain expertise for complex tasks , and a data - driven simulator requires considerable data and it is even unclear how to evaluate a simulator .", "to avoid explicitly building a user simulator beforehand , we propose multi - agent dialog policy learning , which regards both the system and the user as the dialog agents .", "two agents interact with each other and are jointly learned simultaneously .", "the method uses the actor - critic framework to facilitate pretraining and improve scalability .", "we also propose hybrid value network for the role - aware reward decomposition to integrate role - specific domain knowledge of each agent in the task - oriented dialog .", "results show that our method can successfully build a system policy and a user policy simultaneously , and two agents can achieve a high task success rate through conversational interaction ."], "events": [{"event_type": "ITT", "arguments": [{"text": "dialog policy", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["dialog", "policy"], "offsets": [9, 10]}], "trigger": {"text": "train", "tokens": ["train"], "offsets": [7]}}, {"event_type": "RWS", "arguments": [{"text": "user simulator", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["user", "simulator"], "offsets": [25, 26]}, {"text": "obtain", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["obtain"], "offsets": [28]}, {"text": "common approach", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["common", "approach"], "offsets": [19, 20]}], "trigger": {"text": "employ", "tokens": ["employ"], "offsets": [23]}}, {"event_type": "PUR", "arguments": [{"text": "simulated user experiences for reinforcement learning algorithms", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["simulated", "user", "experiences", "for", "reinforcement", "learning", "algorithms"], "offsets": [33, 34, 35, 36, 37, 38, 39]}], "trigger": {"text": "obtain", "tokens": ["obtain"], "offsets": [28]}}, {"event_type": "RWF", "arguments": [{"text": "realistic user simulator", "nugget_type": "MOD", "argument_type": "Concern", "tokens": ["realistic", "user", "simulator"], "offsets": [45, 46, 47]}, {"text": "challenging", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["challenging"], "offsets": [49]}], "trigger": {"text": "modeling", "tokens": ["modeling"], "offsets": [43]}}, {"event_type": "RWF", "arguments": [{"text": "rule - based simulator", "nugget_type": "MOD", "argument_type": "Concern", "tokens": ["rule", "-", "based", "simulator"], "offsets": [52, 53, 54, 55]}, {"text": "heavy domain expertise", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["heavy", "domain", "expertise"], "offsets": [57, 58, 59]}], "trigger": {"text": "requires", "tokens": ["requires"], "offsets": [56]}}, {"event_type": "RWF", "arguments": [{"text": "data - driven simulator", "nugget_type": "MOD", "argument_type": "Concern", "tokens": ["data", "-", "driven", "simulator"], "offsets": [66, 67, 68, 69]}, {"text": "considerable data", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["considerable", "data"], "offsets": [71, 72]}], "trigger": {"text": "requires", "tokens": ["requires"], "offsets": [70]}}, {"event_type": "RWF", "arguments": [{"text": "simulator", "nugget_type": "MOD", "argument_type": "Concern", "tokens": ["simulator"], "offsets": [82]}, {"text": "unclear", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["unclear"], "offsets": [77]}], "trigger": {"text": "evaluate", "tokens": ["evaluate"], "offsets": [80]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [93]}, {"text": "multi - agent dialog policy learning", "nugget_type": "APP", "argument_type": "Content", "tokens": ["multi", "-", "agent", "dialog", "policy", "learning"], "offsets": [95, 96, 97, 98, 99, 100]}, {"text": "avoid explicitly building", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["avoid", "explicitly", "building"], "offsets": [85, 86, 87]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [94]}}, {"event_type": "PUR", "arguments": [{"text": "user simulator", "nugget_type": "MOD", "argument_type": "Aim", "tokens": ["user", "simulator"], "offsets": [89, 90]}], "trigger": {"text": "avoid explicitly building", "tokens": ["avoid", "explicitly", "building"], "offsets": [85, 86, 87]}}, {"event_type": "MDS", "arguments": [{"text": "actor - critic framework", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["actor", "-", "critic", "framework"], "offsets": [131, 132, 133, 134]}, {"text": "facilitate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["facilitate"], "offsets": [136]}, {"text": "improve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improve"], "offsets": [139]}], "trigger": {"text": "uses", "tokens": ["uses"], "offsets": [129]}}, {"event_type": "PUR", "arguments": [{"text": "pretraining", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["pretraining"], "offsets": [137]}], "trigger": {"text": "facilitate", "tokens": ["facilitate"], "offsets": [136]}}, {"event_type": "PUR", "arguments": [{"text": "scalability", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["scalability"], "offsets": [140]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [139]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [142]}, {"text": "hybrid value network", "nugget_type": "APP", "argument_type": "Content", "tokens": ["hybrid", "value", "network"], "offsets": [145, 146, 147]}, {"text": "role - aware reward decomposition", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["role", "-", "aware", "reward", "decomposition"], "offsets": [150, 151, 152, 153, 154]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [144]}}, {"event_type": "FIN", "arguments": [{"text": "successfully build", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["successfully", "build"], "offsets": [178, 179]}, {"text": "achieve", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["achieve"], "offsets": [193]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [173]}}, {"event_type": "FAC", "arguments": [{"text": "multi - agent dialog policy learning", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["multi", "-", "agent", "dialog", "policy", "learning"], "offsets": [95, 96, 97, 98, 99, 100]}, {"text": "simultaneously", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["simultaneously"], "offsets": [187]}, {"text": "system policy", "nugget_type": "APP", "argument_type": "Object", "tokens": ["system", "policy"], "offsets": [181, 182]}, {"text": "user policy", "nugget_type": "APP", "argument_type": "Object", "tokens": ["user", "policy"], "offsets": [185, 186]}], "trigger": {"text": "successfully build", "tokens": ["successfully", "build"], "offsets": [178, 179]}}, {"event_type": "FAC", "arguments": [{"text": "high task success rate", "nugget_type": "STR", "argument_type": "Object", "tokens": ["high", "task", "success", "rate"], "offsets": [195, 196, 197, 198]}, {"text": "through conversational interaction", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["through", "conversational", "interaction"], "offsets": [199, 200, 201]}, {"text": "system policy", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["system", "policy"], "offsets": [181, 182]}, {"text": "user policy", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["user", "policy"], "offsets": [185, 186]}], "trigger": {"text": "achieve", "tokens": ["achieve"], "offsets": [193]}}, {"event_type": "MDS", "arguments": [{"text": "system", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["system"], "offsets": [106]}, {"text": "user", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["user"], "offsets": [109]}, {"text": "dialog agents", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["dialog", "agents"], "offsets": [112, 113]}], "trigger": {"text": "regards", "tokens": ["regards"], "offsets": [103]}}, {"event_type": "WKS", "arguments": [{"text": "two agents", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["two", "agents"], "offsets": [115, 116]}, {"text": "with each other", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "each", "other"], "offsets": [118, 119, 120]}], "trigger": {"text": "interact", "tokens": ["interact"], "offsets": [117]}}, {"event_type": "WKS", "arguments": [{"text": "simultaneously", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["simultaneously"], "offsets": [125]}, {"text": "two agents", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["two", "agents"], "offsets": [115, 116]}], "trigger": {"text": "jointly learned", "tokens": ["jointly", "learned"], "offsets": [123, 124]}}, {"event_type": "MDS", "arguments": [{"text": "role - specific domain knowledge", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["role", "-", "specific", "domain", "knowledge"], "offsets": [157, 158, 159, 160, 161]}, {"text": "each agent", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["each", "agent"], "offsets": [163, 164]}, {"text": "in the task - oriented dialog", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "task", "-", "oriented", "dialog"], "offsets": [165, 166, 167, 168, 169, 170]}], "trigger": {"text": "integrate", "tokens": ["integrate"], "offsets": [156]}}], "document": ["many", "studies", "have", "applied", "reinforcement", "learning", "to", "train", "a", "dialog", "policy", "and", "show", "great", "promise", "these", "years", ".", "one", "common", "approach", "is", "to", "employ", "a", "user", "simulator", "to", "obtain", "a", "large", "number", "of", "simulated", "user", "experiences", "for", "reinforcement", "learning", "algorithms", ".", "however", ",", "modeling", "a", "realistic", "user", "simulator", "is", "challenging", ".", "a", "rule", "-", "based", "simulator", "requires", "heavy", "domain", "expertise", "for", "complex", "tasks", ",", "and", "a", "data", "-", "driven", "simulator", "requires", "considerable", "data", "and", "it", "is", "even", "unclear", "how", "to", "evaluate", "a", "simulator", ".", "to", "avoid", "explicitly", "building", "a", "user", "simulator", "beforehand", ",", "we", "propose", "multi", "-", "agent", "dialog", "policy", "learning", ",", "which", "regards", "both", "the", "system", "and", "the", "user", "as", "the", "dialog", "agents", ".", "two", "agents", "interact", "with", "each", "other", "and", "are", "jointly", "learned", "simultaneously", ".", "the", "method", "uses", "the", "actor", "-", "critic", "framework", "to", "facilitate", "pretraining", "and", "improve", "scalability", ".", "we", "also", "propose", "hybrid", "value", "network", "for", "the", "role", "-", "aware", "reward", "decomposition", "to", "integrate", "role", "-", "specific", "domain", "knowledge", "of", "each", "agent", "in", "the", "task", "-", "oriented", "dialog", ".", "results", "show", "that", "our", "method", "can", "successfully", "build", "a", "system", "policy", "and", "a", "user", "policy", "simultaneously", ",", "and", "two", "agents", "can", "achieve", "a", "high", "task", "success", "rate", "through", "conversational", "interaction", "."]}, {"venue": "ACL", "title": "Superbizarre Is Not Superb: Derivational Morphology Improves BERT\u2019s Interpretation of Complex Words", "abstract": "How does the input segmentation of pretrained language models (PLMs) affect their interpretations of complex words? We present the first study investigating this question, taking BERT as the example PLM and focusing on its semantic representations of English derivatives. We show that PLMs can be interpreted as serial dual-route models, i.e., the meanings of complex words are either stored or else need to be computed from the subwords, which implies that maximally meaningful input tokens should allow for the best generalization on new words. This hypothesis is confirmed by a series of semantic probing tasks on which DelBERT (Derivation leveraging BERT), a model with derivational input segmentation, substantially outperforms BERT with WordPiece segmentation. Our results suggest that the generalization capabilities of PLMs could be further improved if a morphologically-informed vocabulary of input tokens were used.", "doc_id": "f1c549e9415c7924e8c7e0e9b38ee43b", "publication_year": 2021, "sentences": ["how does the input segmentation of pretrained language models ( plms ) affect their interpretations of complex words ?", "we present the first study investigating this question , taking bert as the example plm and focusing on its semantic representations of english derivatives .", "we show that plms can be interpreted as serial dual - route models , i . e . , the meanings of complex words are either stored or else need to be computed from the subwords , which implies that maximally meaningful input tokens should allow for the best generalization on new words .", "this hypothesis is confirmed by a series of semantic probing tasks on which delbert ( derivation leveraging bert ) , a model with derivational input segmentation , substantially outperforms bert with wordpiece segmentation .", "our results suggest that the generalization capabilities of plms could be further improved if a morphologically - informed vocabulary of input tokens were used ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [19]}, {"text": "first study", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["first", "study"], "offsets": [22, 23]}, {"text": "investigating", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["investigating"], "offsets": [24]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [20]}}, {"event_type": "PUR", "arguments": [{"text": "this question", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["input", "segmentation", "of", "pretrained", "language", "models", "affect", "their", "interpretations", "of", "complex", "words"], "offsets": [3, 4, 5, 6, 7, 8, 12, 13, 14, 15, 16, 17]}], "trigger": {"text": "investigating", "tokens": ["investigating"], "offsets": [24]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [44]}, {"text": "interpreted", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["interpreted"], "offsets": [50]}, {"text": "allow", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["allow"], "offsets": [89]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [45]}}, {"event_type": "FAC", "arguments": [{"text": "pretrained language models", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["pretrained", "language", "models"], "offsets": [6, 7, 8]}, {"text": "serial dual - route models", "nugget_type": "APP", "argument_type": "Object", "tokens": ["serial", "dual", "-", "route", "models"], "offsets": [52, 53, 54, 55, 56]}], "trigger": {"text": "interpreted", "tokens": ["interpreted"], "offsets": [50]}}, {"event_type": "CMP", "arguments": [{"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [126]}, {"text": "bert with wordpiece segmentation", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["bert", "with", "wordpiece", "segmentation"], "offsets": [127, 128, 129, 130]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [126]}}, {"event_type": "FIN", "arguments": [{"text": "our", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["our"], "offsets": [132]}, {"text": "further improved", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["further", "improved"], "offsets": [143, 144]}], "trigger": {"text": "suggest", "tokens": ["suggest"], "offsets": [134]}}, {"event_type": "FAC", "arguments": [{"text": "generalization capabilities of plms", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["generalization", "capabilities", "of", "pretrained", "language", "models"], "offsets": [137, 138, 139, 6, 7, 8]}, {"text": "if a morphologically - informed vocabulary of input tokens were used", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["if", "a", "morphologically", "-", "informed", "vocabulary", "of", "input", "tokens", "were", "used"], "offsets": [145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155]}], "trigger": {"text": "further improved", "tokens": ["further", "improved"], "offsets": [143, 144]}}, {"event_type": "WKS", "arguments": [{"text": "semantic representations of english derivatives", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["semantic", "representations", "of", "english", "derivatives"], "offsets": [38, 39, 40, 41, 42]}, {"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [19]}], "trigger": {"text": "focusing on", "tokens": ["focusing", "on"], "offsets": [35, 36]}}, {"event_type": "FAC", "arguments": [{"text": "maximally meaningful input tokens", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["maximally", "meaningful", "input", "tokens"], "offsets": [84, 85, 86, 87]}, {"text": "best generalization on new words", "nugget_type": "STR", "argument_type": "Object", "tokens": ["best", "generalization", "on", "new", "words"], "offsets": [92, 93, 94, 95, 96]}], "trigger": {"text": "allow", "tokens": ["allow"], "offsets": [89]}}, {"event_type": "FAC", "arguments": [{"text": "this hypothesis", "nugget_type": "APP", "argument_type": "Object", "tokens": ["this", "hypothesis"], "offsets": [98, 99]}, {"text": "series of semantic probing tasks", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["series", "of", "semantic", "probing", "tasks"], "offsets": [104, 105, 106, 107, 108]}], "trigger": {"text": "confirmed", "tokens": ["confirmed"], "offsets": [101]}}], "document": ["how", "does", "the", "input", "segmentation", "of", "pretrained", "language", "models", "(", "plms", ")", "affect", "their", "interpretations", "of", "complex", "words", "?", "we", "present", "the", "first", "study", "investigating", "this", "question", ",", "taking", "bert", "as", "the", "example", "plm", "and", "focusing", "on", "its", "semantic", "representations", "of", "english", "derivatives", ".", "we", "show", "that", "plms", "can", "be", "interpreted", "as", "serial", "dual", "-", "route", "models", ",", "i", ".", "e", ".", ",", "the", "meanings", "of", "complex", "words", "are", "either", "stored", "or", "else", "need", "to", "be", "computed", "from", "the", "subwords", ",", "which", "implies", "that", "maximally", "meaningful", "input", "tokens", "should", "allow", "for", "the", "best", "generalization", "on", "new", "words", ".", "this", "hypothesis", "is", "confirmed", "by", "a", "series", "of", "semantic", "probing", "tasks", "on", "which", "delbert", "(", "derivation", "leveraging", "bert", ")", ",", "a", "model", "with", "derivational", "input", "segmentation", ",", "substantially", "outperforms", "bert", "with", "wordpiece", "segmentation", ".", "our", "results", "suggest", "that", "the", "generalization", "capabilities", "of", "plms", "could", "be", "further", "improved", "if", "a", "morphologically", "-", "informed", "vocabulary", "of", "input", "tokens", "were", "used", "."]}, {"venue": "ACL", "title": "Contextualized Weak Supervision for Text Classification", "abstract": "Weakly supervised text classification based on a few user-provided seed words has recently attracted much attention from researchers. Existing methods mainly generate pseudo-labels in a context-free manner (e.g., string matching), therefore, the ambiguous, context-dependent nature of human language has been long overlooked. In this paper, we propose a novel framework ConWea, providing contextualized weak supervision for text classification. Specifically, we leverage contextualized representations of word occurrences and seed word information to automatically differentiate multiple interpretations of the same word, and thus create a contextualized corpus. This contextualized corpus is further utilized to train the classifier and expand seed words in an iterative manner. This process not only adds new contextualized, highly label-indicative keywords but also disambiguates initial seed words, making our weak supervision fully contextualized. Extensive experiments and case studies on real-world datasets demonstrate the necessity and significant advantages of using contextualized weak supervision, especially when the class labels are fine-grained.", "doc_id": "e44724b65a1221f7af1786a9f5ff8a63", "publication_year": 2020, "sentences": ["weakly supervised text classification based on a few user - provided seed words has recently attracted much attention from researchers .", "existing methods mainly generate pseudo - labels in a context - free manner ( e . g . , string matching ) , therefore , the ambiguous , context - dependent nature of human language has been long overlooked .", "in this paper , we propose a novel framework conwea , providing contextualized weak supervision for text classification .", "specifically , we leverage contextualized representations of word occurrences and seed word information to automatically differentiate multiple interpretations of the same word , and thus create a contextualized corpus .", "this contextualized corpus is further utilized to train the classifier and expand seed words in an iterative manner .", "this process not only adds new contextualized , highly label - indicative keywords but also disambiguates initial seed words , making our weak supervision fully contextualized .", "extensive experiments and case studies on real - world datasets demonstrate the necessity and significant advantages of using contextualized weak supervision , especially when the class labels are fine - grained ."], "events": [{"event_type": "ITT", "arguments": [{"text": "weakly supervised text classification", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["weakly", "supervised", "text", "classification"], "offsets": [0, 1, 2, 3]}], "trigger": {"text": "attracted", "tokens": ["attracted"], "offsets": [15]}}, {"event_type": "RWF", "arguments": [{"text": "long overlooked", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["long", "overlooked"], "offsets": [58, 59]}, {"text": "ambiguous nature of human language", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["ambiguous", "nature", "of", "human", "language"], "offsets": [47, 52, 53, 54, 55]}], "trigger": {"text": "long overlooked", "tokens": ["long", "overlooked"], "offsets": [58, 59]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [65]}, {"text": "conwea", "nugget_type": "APP", "argument_type": "Content", "tokens": ["conwea"], "offsets": [70]}, {"text": "providing", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["providing"], "offsets": [72]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [66]}}, {"event_type": "PUR", "arguments": [{"text": "contextualized weak supervision", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["contextualized", "weak", "supervision"], "offsets": [73, 74, 75]}, {"text": "for text classification", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["for", "text", "classification"], "offsets": [76, 77, 78]}], "trigger": {"text": "providing", "tokens": ["providing"], "offsets": [72]}}, {"event_type": "MDS", "arguments": [{"text": "contextualized representations of word occurrences", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["contextualized", "representations", "of", "word", "occurrences"], "offsets": [84, 85, 86, 87, 88]}, {"text": "seed word information", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["seed", "word", "information"], "offsets": [90, 91, 92]}, {"text": "create", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["create"], "offsets": [105]}, {"text": "multiple interpretations of the same word", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["multiple", "interpretations", "of", "the", "same", "word"], "offsets": [96, 97, 98, 99, 100, 101]}], "trigger": {"text": "automatically differentiate", "tokens": ["automatically", "differentiate"], "offsets": [94, 95]}}, {"event_type": "PUR", "arguments": [{"text": "contextualized corpus", "nugget_type": "DST", "argument_type": "Aim", "tokens": ["contextualized", "corpus"], "offsets": [107, 108]}], "trigger": {"text": "create", "tokens": ["create"], "offsets": [105]}}, {"event_type": "MDS", "arguments": [{"text": "contextualized corpus", "nugget_type": "DST", "argument_type": "BaseComponent", "tokens": ["contextualized", "corpus"], "offsets": [111, 112]}, {"text": "train", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["train"], "offsets": [117]}, {"text": "expand", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["expand"], "offsets": [121]}], "trigger": {"text": "utilized", "tokens": ["utilized"], "offsets": [115]}}, {"event_type": "PUR", "arguments": [{"text": "classifier", "nugget_type": "MOD", "argument_type": "Aim", "tokens": ["classifier"], "offsets": [119]}], "trigger": {"text": "train", "tokens": ["train"], "offsets": [117]}}, {"event_type": "PUR", "arguments": [{"text": "seed words", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["seed", "words"], "offsets": [122, 123]}, {"text": "in an iterative manner", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "an", "iterative", "manner"], "offsets": [124, 125, 126, 127]}], "trigger": {"text": "expand", "tokens": ["expand"], "offsets": [121]}}, {"event_type": "FAC", "arguments": [{"text": "real - world datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["real", "-", "world", "datasets"], "offsets": [162, 163, 164, 165]}, {"text": "necessity and significant advantages", "nugget_type": "STR", "argument_type": "Subject", "tokens": ["necessity", "significant", "advantages"], "offsets": [168, 170, 171]}, {"text": "of using contextualized weak supervision", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["of", "using", "contextualized", "weak", "supervision"], "offsets": [172, 173, 174, 175, 176]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [166]}}, {"event_type": "RWF", "arguments": [{"text": "existing methods", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["existing", "methods"], "offsets": [21, 22]}, {"text": "pseudo - labels", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["pseudo", "-", "labels"], "offsets": [25, 26, 27]}, {"text": "in a context - free manner", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "a", "context", "-", "free", "manner"], "offsets": [28, 29, 30, 31, 32, 33]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [24]}}, {"event_type": "MDS", "arguments": [{"text": "new contextualized , highly label - indicative keywords", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["new", "contextualized", ",", "highly", "label", "-", "indicative", "keywords"], "offsets": [134, 135, 136, 137, 138, 139, 140, 141]}, {"text": "making", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["making"], "offsets": [149]}], "trigger": {"text": "adds", "tokens": ["adds"], "offsets": [133]}}, {"event_type": "MDS", "arguments": [{"text": "initial seed words", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["initial", "seed", "words"], "offsets": [145, 146, 147]}, {"text": "making", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["making"], "offsets": [149]}], "trigger": {"text": "disambiguates", "tokens": ["disambiguates"], "offsets": [144]}}, {"event_type": "PUR", "arguments": [{"text": "weak supervision fully contextualized", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["weak", "supervision", "fully", "contextualized"], "offsets": [151, 152, 153, 154]}], "trigger": {"text": "making", "tokens": ["making"], "offsets": [149]}}], "document": ["weakly", "supervised", "text", "classification", "based", "on", "a", "few", "user", "-", "provided", "seed", "words", "has", "recently", "attracted", "much", "attention", "from", "researchers", ".", "existing", "methods", "mainly", "generate", "pseudo", "-", "labels", "in", "a", "context", "-", "free", "manner", "(", "e", ".", "g", ".", ",", "string", "matching", ")", ",", "therefore", ",", "the", "ambiguous", ",", "context", "-", "dependent", "nature", "of", "human", "language", "has", "been", "long", "overlooked", ".", "in", "this", "paper", ",", "we", "propose", "a", "novel", "framework", "conwea", ",", "providing", "contextualized", "weak", "supervision", "for", "text", "classification", ".", "specifically", ",", "we", "leverage", "contextualized", "representations", "of", "word", "occurrences", "and", "seed", "word", "information", "to", "automatically", "differentiate", "multiple", "interpretations", "of", "the", "same", "word", ",", "and", "thus", "create", "a", "contextualized", "corpus", ".", "this", "contextualized", "corpus", "is", "further", "utilized", "to", "train", "the", "classifier", "and", "expand", "seed", "words", "in", "an", "iterative", "manner", ".", "this", "process", "not", "only", "adds", "new", "contextualized", ",", "highly", "label", "-", "indicative", "keywords", "but", "also", "disambiguates", "initial", "seed", "words", ",", "making", "our", "weak", "supervision", "fully", "contextualized", ".", "extensive", "experiments", "and", "case", "studies", "on", "real", "-", "world", "datasets", "demonstrate", "the", "necessity", "and", "significant", "advantages", "of", "using", "contextualized", "weak", "supervision", ",", "especially", "when", "the", "class", "labels", "are", "fine", "-", "grained", "."]}, {"venue": "ACL", "title": "An Empirical Study on Explanations in Out-of-Domain Settings", "abstract": "Recent work in Natural Language Processing has focused on developing approaches that extract faithful explanations, either via identifying the most important tokens in the input (i.e. post-hoc explanations) or by designing inherently faithful models that first select the most important tokens and then use them to predict the correct label (i.e. select-then-predict models). Currently, these approaches are largely evaluated on in-domain settings. Yet, little is known about how post-hoc explanations and inherently faithful models perform in out-of-domain settings. In this paper, we conduct an extensive empirical study that examines: (1) the out-of-domain faithfulness of post-hoc explanations, generated by five feature attribution methods; and (2) the out-of-domain performance of two inherently faithful models over six datasets. Contrary to our expectations, results show that in many cases out-of-domain post-hoc explanation faithfulness measured by sufficiency and comprehensiveness is higher compared to in-domain. We find this misleading and suggest using a random baseline as a yardstick for evaluating post-hoc explanation faithfulness. Our findings also show that select-then predict models demonstrate comparable predictive performance in out-of-domain settings to full-text trained models.", "doc_id": "e99adeec68628dd7ca381d64c4806687", "publication_year": 2022, "sentences": ["recent work in natural language processing has focused on developing approaches that extract faithful explanations , either via identifying the most important tokens in the input ( i . e . post - hoc explanations ) or by designing inherently faithful models that first select the most important tokens and then use them to predict the correct label ( i . e . select - then - predict models ) .", "currently , these approaches are largely evaluated on in - domain settings .", "yet , little is known about how post - hoc explanations and inherently faithful models perform in out - of - domain settings .", "in this paper , we conduct an extensive empirical study that examines : ( 1 ) the out - of - domain faithfulness of post - hoc explanations , generated by five feature attribution methods ; and ( 2 ) the out - of - domain performance of two inherently faithful models over six datasets .", "contrary to our expectations , results show that in many cases out - of - domain post - hoc explanation faithfulness measured by sufficiency and comprehensiveness is higher compared to in - domain .", "we find this misleading and suggest using a random baseline as a yardstick for evaluating post - hoc explanation faithfulness .", "our findings also show that select - then predict models demonstrate comparable predictive performance in out - of - domain settings to full - text trained models ."], "events": [{"event_type": "ITT", "arguments": [{"text": "natural language processing", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["natural", "language", "processing"], "offsets": [3, 4, 5]}], "trigger": {"text": "focused", "tokens": ["focused"], "offsets": [7]}}, {"event_type": "RWF", "arguments": [{"text": "post - hoc explanations", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["post", "-", "hoc", "explanations"], "offsets": [91, 92, 93, 94]}, {"text": "inherently faithful models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["inherently", "faithful", "models"], "offsets": [96, 97, 98]}, {"text": "in out - of - domain settings", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "out", "-", "of", "-", "domain", "settings"], "offsets": [100, 101, 102, 103, 104, 105, 106]}, {"text": "little is known", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["little", "is", "known"], "offsets": [86, 87, 88]}], "trigger": {"text": "perform", "tokens": ["perform"], "offsets": [99]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [112]}, {"text": "extensive empirical study", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["extensive", "empirical", "study"], "offsets": [115, 116, 117]}, {"text": "examines", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["examines"], "offsets": [119]}], "trigger": {"text": "conduct", "tokens": ["conduct"], "offsets": [113]}}, {"event_type": "FIN", "arguments": [{"text": "compared", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["compared"], "offsets": [192]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [170]}}, {"event_type": "CMP", "arguments": [{"text": "higher", "nugget_type": "STR", "argument_type": "Result", "tokens": ["higher"], "offsets": [191]}, {"text": "in - domain", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["in", "-", "domain"], "offsets": [194, 195, 196]}, {"text": "out - of - domain post - hoc explanation", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["out", "-", "of", "-", "domain", "post", "-", "hoc", "explanation"], "offsets": [175, 176, 177, 178, 179, 180, 181, 182, 183]}, {"text": "sufficiency", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["sufficiency"], "offsets": [187]}, {"text": "comprehensiveness", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["comprehensiveness"], "offsets": [189]}], "trigger": {"text": "compared", "tokens": ["compared"], "offsets": [192]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [198]}, {"text": "evaluating", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["evaluating"], "offsets": [212]}, {"text": "random baseline", "nugget_type": "APP", "argument_type": "Content", "tokens": ["random", "baseline"], "offsets": [206, 207]}, {"text": "as a yardstick", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["as", "a", "yardstick"], "offsets": [208, 209, 210]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [204]}}, {"event_type": "PUR", "arguments": [{"text": "post - hoc explanation faithfulness", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["post", "-", "hoc", "explanation", "faithfulness"], "offsets": [213, 214, 215, 216, 217]}], "trigger": {"text": "evaluating", "tokens": ["evaluating"], "offsets": [212]}}, {"event_type": "FIN", "arguments": [{"text": "demonstrate", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["demonstrate"], "offsets": [229]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [222]}}, {"event_type": "CMP", "arguments": [{"text": "full - text trained models", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["full", "-", "text", "trained", "models"], "offsets": [241, 242, 243, 244, 245]}, {"text": "select - then predict models", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["select", "-", "then", "predict", "models"], "offsets": [224, 225, 226, 227, 228]}, {"text": "in out - of - domain settings", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "out", "-", "of", "-", "domain", "settings"], "offsets": [233, 234, 235, 236, 237, 238, 239]}, {"text": "comparable predictive performance", "nugget_type": "STR", "argument_type": "Result", "tokens": ["comparable", "predictive", "performance"], "offsets": [230, 231, 232]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [229]}}, {"event_type": "PUR", "arguments": [{"text": "out - of - domain faithfulness of post - hoc explanations", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["out", "-", "of", "-", "domain", "faithfulness", "of", "post", "-", "hoc", "explanations"], "offsets": [125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135]}, {"text": "out - of - domain performance of two inherently faithful models", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["out", "-", "of", "-", "domain", "performance", "of", "two", "inherently", "faithful", "models"], "offsets": [149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159]}], "trigger": {"text": "examines", "tokens": ["examines"], "offsets": [119]}}, {"event_type": "RWS", "arguments": [{"text": "most important tokens", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["most", "important", "tokens"], "offsets": [20, 21, 22]}, {"text": "input", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["input"], "offsets": [25]}], "trigger": {"text": "identifying", "tokens": ["identifying"], "offsets": [18]}}, {"event_type": "RWS", "arguments": [{"text": "inherently faithful models", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["inherently", "faithful", "models"], "offsets": [39, 40, 41]}, {"text": "predict", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["predict"], "offsets": [54]}, {"text": "most important tokens", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["most", "important", "tokens"], "offsets": [46, 47, 48]}], "trigger": {"text": "select", "tokens": ["select"], "offsets": [44]}}, {"event_type": "PUR", "arguments": [{"text": "correct label", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["correct", "label"], "offsets": [56, 57]}], "trigger": {"text": "predict", "tokens": ["predict"], "offsets": [54]}}, {"event_type": "FAC", "arguments": [{"text": "misleading", "nugget_type": "WEA", "argument_type": "Subject", "tokens": ["misleading"], "offsets": [201]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [199]}}], "document": ["recent", "work", "in", "natural", "language", "processing", "has", "focused", "on", "developing", "approaches", "that", "extract", "faithful", "explanations", ",", "either", "via", "identifying", "the", "most", "important", "tokens", "in", "the", "input", "(", "i", ".", "e", ".", "post", "-", "hoc", "explanations", ")", "or", "by", "designing", "inherently", "faithful", "models", "that", "first", "select", "the", "most", "important", "tokens", "and", "then", "use", "them", "to", "predict", "the", "correct", "label", "(", "i", ".", "e", ".", "select", "-", "then", "-", "predict", "models", ")", ".", "currently", ",", "these", "approaches", "are", "largely", "evaluated", "on", "in", "-", "domain", "settings", ".", "yet", ",", "little", "is", "known", "about", "how", "post", "-", "hoc", "explanations", "and", "inherently", "faithful", "models", "perform", "in", "out", "-", "of", "-", "domain", "settings", ".", "in", "this", "paper", ",", "we", "conduct", "an", "extensive", "empirical", "study", "that", "examines", ":", "(", "1", ")", "the", "out", "-", "of", "-", "domain", "faithfulness", "of", "post", "-", "hoc", "explanations", ",", "generated", "by", "five", "feature", "attribution", "methods", ";", "and", "(", "2", ")", "the", "out", "-", "of", "-", "domain", "performance", "of", "two", "inherently", "faithful", "models", "over", "six", "datasets", ".", "contrary", "to", "our", "expectations", ",", "results", "show", "that", "in", "many", "cases", "out", "-", "of", "-", "domain", "post", "-", "hoc", "explanation", "faithfulness", "measured", "by", "sufficiency", "and", "comprehensiveness", "is", "higher", "compared", "to", "in", "-", "domain", ".", "we", "find", "this", "misleading", "and", "suggest", "using", "a", "random", "baseline", "as", "a", "yardstick", "for", "evaluating", "post", "-", "hoc", "explanation", "faithfulness", ".", "our", "findings", "also", "show", "that", "select", "-", "then", "predict", "models", "demonstrate", "comparable", "predictive", "performance", "in", "out", "-", "of", "-", "domain", "settings", "to", "full", "-", "text", "trained", "models", "."]}, {"venue": "ACL", "title": "LinkBERT: Pretraining Language Models with Document Links", "abstract": "Language model (LM) pretraining captures various knowledge from text corpora, helping downstream tasks. However, existing methods such as BERT model a single document, and do not capture dependencies or knowledge that span across documents. In this work, we propose LinkBERT, an LM pretraining method that leverages links between documents, e.g., hyperlinks. Given a text corpus, we view it as a graph of documents and create LM inputs by placing linked documents in the same context. We then pretrain the LM with two joint self-supervised objectives: masked language modeling and our new proposal, document relation prediction. We show that LinkBERT outperforms BERT on various downstream tasks across two domains: the general domain (pretrained on Wikipedia with hyperlinks) and biomedical domain (pretrained on PubMed with citation links). LinkBERT is especially effective for multi-hop reasoning and few-shot QA (+5% absolute improvement on HotpotQA and TriviaQA), and our biomedical LinkBERT sets new states of the art on various BioNLP tasks (+7% on BioASQ and USMLE). We release our pretrained models, LinkBERT and BioLinkBERT, as well as code and data.", "doc_id": "03961911a5d092247400a628c456da74", "publication_year": 2022, "sentences": ["language model ( lm ) pretraining captures various knowledge from text corpora , helping downstream tasks .", "however , existing methods such as bert model a single document , and do not capture dependencies or knowledge that span across documents .", "in this work , we propose linkbert , an lm pretraining method that leverages links between documents , e . g . , hyperlinks .", "given a text corpus , we view it as a graph of documents and create lm inputs by placing linked documents in the same context .", "we then pretrain the lm with two joint self - supervised objectives : masked language modeling and our new proposal , document relation prediction .", "we show that linkbert outperforms bert on various downstream tasks across two domains : the general domain ( pretrained on wikipedia with hyperlinks ) and biomedical domain ( pretrained on pubmed with citation links ) .", "linkbert is especially effective for multi - hop reasoning and few - shot qa ( + 5 % absolute improvement on hotpotqa and triviaqa ) , and our biomedical linkbert sets new states of the art on various bionlp tasks ( + 7 % on bioasq and usmle ) .", "we release our pretrained models , linkbert and biolinkbert , as well as code and data ."], "events": [{"event_type": "ITT", "arguments": [{"text": "language model ( lm ) pretraining", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["language", "model", "(", "lm", ")", "pretraining"], "offsets": [0, 1, 2, 3, 4, 5]}], "trigger": {"text": "captures", "tokens": ["captures"], "offsets": [6]}}, {"event_type": "RWF", "arguments": [{"text": "existing methods", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["existing", "methods"], "offsets": [19, 20]}, {"text": "dependencies or knowledge", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["dependencies", "or", "knowledge"], "offsets": [33, 34, 35]}], "trigger": {"text": "do not capture", "tokens": ["do", "not", "capture"], "offsets": [30, 31, 32]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [45]}, {"text": "lm pretraining method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["language", "model", "pretraining", "method"], "offsets": [0, 1, 51, 52]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [46]}}, {"event_type": "MDS", "arguments": [{"text": "text corpus", "nugget_type": "DST", "argument_type": "TriedComponent", "tokens": ["text", "corpus"], "offsets": [68, 69]}, {"text": "graph of documents", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["graph", "of", "documents"], "offsets": [76, 77, 78]}], "trigger": {"text": "view", "tokens": ["view"], "offsets": [72]}}, {"event_type": "MDS", "arguments": [{"text": "linked documents", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["linked", "documents"], "offsets": [85, 86]}, {"text": "same context", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["same", "context"], "offsets": [89, 90]}, {"text": "create", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["create"], "offsets": [80]}], "trigger": {"text": "placing", "tokens": ["placing"], "offsets": [84]}}, {"event_type": "PUR", "arguments": [{"text": "lm inputs", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["language", "model", "inputs"], "offsets": [0, 1, 82]}], "trigger": {"text": "create", "tokens": ["create"], "offsets": [80]}}, {"event_type": "MDS", "arguments": [{"text": "lm", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["lm"], "offsets": [96]}, {"text": "masked language modeling", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["masked", "language", "modeling"], "offsets": [105, 106, 107]}, {"text": "document relation prediction", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["document", "relation", "prediction"], "offsets": [113, 114, 115]}], "trigger": {"text": "pretrain", "tokens": ["pretrain"], "offsets": [94]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [117]}, {"text": "outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforms"], "offsets": [121]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [118]}}, {"event_type": "CMP", "arguments": [{"text": "linkbert", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["linkbert"], "offsets": [120]}, {"text": "bert", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["bert"], "offsets": [122]}, {"text": "various downstream tasks", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["various", "downstream", "tasks"], "offsets": [124, 125, 126]}, {"text": "across two domains", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["across", "two", "domains"], "offsets": [127, 128, 129]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [121]}}, {"event_type": "FAC", "arguments": [{"text": "linkbert", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["linkbert"], "offsets": [153]}, {"text": "multi - hop reasoning and few - shot qa", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["multi", "-", "hop", "reasoning", "and", "few", "-", "shot", "qa"], "offsets": [158, 159, 160, 161, 162, 163, 164, 165, 166]}], "trigger": {"text": "especially effective", "tokens": ["especially", "effective"], "offsets": [155, 156]}}, {"event_type": "FAC", "arguments": [{"text": "biomedical linkbert", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["biomedical", "linkbert"], "offsets": [181, 182]}, {"text": "on various bionlp tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "various", "bionlp", "tasks"], "offsets": [189, 190, 191, 192]}, {"text": "new states of the art", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["new", "states", "of", "the", "art"], "offsets": [184, 185, 186, 187, 188]}], "trigger": {"text": "sets", "tokens": ["sets"], "offsets": [183]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [203]}, {"text": "linkbert and biolinkbert", "nugget_type": "APP", "argument_type": "Content", "tokens": ["linkbert", "and", "biolinkbert"], "offsets": [209, 210, 211]}, {"text": "code and data", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["code", "and", "data"], "offsets": [216, 217, 218]}], "trigger": {"text": "release", "tokens": ["release"], "offsets": [204]}}], "document": ["language", "model", "(", "lm", ")", "pretraining", "captures", "various", "knowledge", "from", "text", "corpora", ",", "helping", "downstream", "tasks", ".", "however", ",", "existing", "methods", "such", "as", "bert", "model", "a", "single", "document", ",", "and", "do", "not", "capture", "dependencies", "or", "knowledge", "that", "span", "across", "documents", ".", "in", "this", "work", ",", "we", "propose", "linkbert", ",", "an", "lm", "pretraining", "method", "that", "leverages", "links", "between", "documents", ",", "e", ".", "g", ".", ",", "hyperlinks", ".", "given", "a", "text", "corpus", ",", "we", "view", "it", "as", "a", "graph", "of", "documents", "and", "create", "lm", "inputs", "by", "placing", "linked", "documents", "in", "the", "same", "context", ".", "we", "then", "pretrain", "the", "lm", "with", "two", "joint", "self", "-", "supervised", "objectives", ":", "masked", "language", "modeling", "and", "our", "new", "proposal", ",", "document", "relation", "prediction", ".", "we", "show", "that", "linkbert", "outperforms", "bert", "on", "various", "downstream", "tasks", "across", "two", "domains", ":", "the", "general", "domain", "(", "pretrained", "on", "wikipedia", "with", "hyperlinks", ")", "and", "biomedical", "domain", "(", "pretrained", "on", "pubmed", "with", "citation", "links", ")", ".", "linkbert", "is", "especially", "effective", "for", "multi", "-", "hop", "reasoning", "and", "few", "-", "shot", "qa", "(", "+", "5", "%", "absolute", "improvement", "on", "hotpotqa", "and", "triviaqa", ")", ",", "and", "our", "biomedical", "linkbert", "sets", "new", "states", "of", "the", "art", "on", "various", "bionlp", "tasks", "(", "+", "7", "%", "on", "bioasq", "and", "usmle", ")", ".", "we", "release", "our", "pretrained", "models", ",", "linkbert", "and", "biolinkbert", ",", "as", "well", "as", "code", "and", "data", "."]}, {"venue": "ACL", "title": "Multilingual unsupervised sequence segmentation transfers to extremely low-resource languages", "abstract": "We show that unsupervised sequence-segmentation performance can be transferred to extremely low-resource languages by pre-training a Masked Segmental Language Model (Downey et al., 2021) multilingually. Further, we show that this transfer can be achieved by training over a collection of low-resource languages that are typologically similar (but phylogenetically unrelated) to the target language. In our experiments, we transfer from a collection of 10 Indigenous American languages (AmericasNLP, Mager et al., 2021) to K\u2019iche\u2019, a Mayan language. We compare our multilingual model to a monolingual (from-scratch) baseline, as well as a model pre-trained on Quechua only. We show that the multilingual pre-trained approach yields consistent segmentation quality across target dataset sizes, exceeding the monolingual baseline in 6/10 experimental settings. Our model yields especially strong results at small target sizes, including a zero-shot performance of 20.6 F1. These results have promising implications for low-resource NLP pipelines involving human-like linguistic units, such as the sparse transcription framework proposed by Bird (2020).", "doc_id": "e690a97cbe8e47e7b933a090eb255de4", "publication_year": 2022, "sentences": ["we show that unsupervised sequence - segmentation performance can be transferred to extremely low - resource languages by pre - training a masked segmental language model ( downey et al . , 2021 ) multilingually .", "further , we show that this transfer can be achieved by training over a collection of low - resource languages that are typologically similar ( but phylogenetically unrelated ) to the target language .", "in our experiments , we transfer from a collection of 10 indigenous american languages ( americasnlp , mager et al . , 2021 ) to k \u2019 iche \u2019 , a mayan language .", "we compare our multilingual model to a monolingual ( from - scratch ) baseline , as well as a model pre - trained on quechua only .", "we show that the multilingual pre - trained approach yields consistent segmentation quality across target dataset sizes , exceeding the monolingual baseline in 6 / 10 experimental settings .", "our model yields especially strong results at small target sizes , including a zero - shot performance of 20 . 6 f1 .", "these results have promising implications for low - resource nlp pipelines involving human - like linguistic units , such as the sparse transcription framework proposed by bird ( 2020 ) ."], "events": [{"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [0]}, {"text": "transferred", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["transferred"], "offsets": [10]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [1]}}, {"event_type": "FAC", "arguments": [{"text": "unsupervised sequence - segmentation performance", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["unsupervised", "sequence", "-", "segmentation", "performance"], "offsets": [3, 4, 5, 6, 7]}, {"text": "extremely low - resource languages", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["extremely", "low", "-", "resource", "languages"], "offsets": [12, 13, 14, 15, 16]}, {"text": "pre - training a masked segmental language model ( downey et al . , 2021 ) multilingually", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["pre", "-", "training", "a", "masked", "segmental", "language", "model", "(", "downey", "et", "al", ".", ",", "2021", ")", "multilingually"], "offsets": [18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34]}], "trigger": {"text": "transferred", "tokens": ["transferred"], "offsets": [10]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [38]}, {"text": "achieved", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["achieved"], "offsets": [45]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [39]}}, {"event_type": "FAC", "arguments": [{"text": "transfer", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["transfer"], "offsets": [42]}, {"text": "training over a collection of low - resource languages", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["training", "over", "a", "collection", "of", "low", "-", "resource", "languages"], "offsets": [47, 48, 49, 50, 51, 52, 53, 54, 55]}], "trigger": {"text": "achieved", "tokens": ["achieved"], "offsets": [45]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [74]}, {"text": "collection of 10 indigenous american languages", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["collection", "of", "10", "indigenous", "american", "languages"], "offsets": [78, 79, 80, 81, 82, 83]}, {"text": "mayan language", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["mayan", "language"], "offsets": [101, 102]}], "trigger": {"text": "transfer", "tokens": ["transfer"], "offsets": [75]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [104]}, {"text": "our multilingual model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["our", "multilingual", "model"], "offsets": [106, 107, 108]}, {"text": "monolingual ( from - scratch ) baseline", "nugget_type": "APP", "argument_type": "Content", "tokens": ["monolingual", "(", "from", "-", "scratch", ")", "baseline"], "offsets": [111, 112, 113, 114, 115, 116, 117]}], "trigger": {"text": "compare", "tokens": ["compare"], "offsets": [105]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [104]}, {"text": "our multilingual model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["our", "multilingual", "model"], "offsets": [106, 107, 108]}, {"text": "model pre - trained on quechua only", "nugget_type": "APP", "argument_type": "Content", "tokens": ["model", "pre", "-", "trained", "on", "quechua", "only"], "offsets": [123, 124, 125, 126, 127, 128, 129]}], "trigger": {"text": "compare", "tokens": ["compare"], "offsets": [105]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [131]}, {"text": "yields", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["yields"], "offsets": [140]}, {"text": "exceeding", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["exceeding"], "offsets": [149]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [132]}}, {"event_type": "FAC", "arguments": [{"text": "multilingual pre - trained approach", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["multilingual", "pre", "-", "trained", "approach"], "offsets": [135, 136, 137, 138, 139]}, {"text": "consistent segmentation quality", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["consistent", "segmentation", "quality"], "offsets": [141, 142, 143]}, {"text": "across target dataset sizes", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["across", "target", "dataset", "sizes"], "offsets": [144, 145, 146, 147]}], "trigger": {"text": "yields", "tokens": ["yields"], "offsets": [140]}}, {"event_type": "CMP", "arguments": [{"text": "multilingual pre - trained approach", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["multilingual", "pre", "-", "trained", "approach"], "offsets": [135, 136, 137, 138, 139]}, {"text": "exceeding", "nugget_type": "STR", "argument_type": "Result", "tokens": ["exceeding"], "offsets": [149]}, {"text": "monolingual baseline", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["monolingual", "baseline"], "offsets": [151, 152]}], "trigger": {"text": "exceeding", "tokens": ["exceeding"], "offsets": [149]}}, {"event_type": "FAC", "arguments": [{"text": "our multilingual model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["our", "multilingual", "model"], "offsets": [106, 107, 108]}, {"text": "especially strong results", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["especially", "strong", "results"], "offsets": [163, 164, 165]}, {"text": "at small target sizes", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["at", "small", "target", "sizes"], "offsets": [166, 167, 168, 169]}], "trigger": {"text": "yields", "tokens": ["yields"], "offsets": [162]}}, {"event_type": "FAC", "arguments": [{"text": "especially strong results", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["especially", "strong", "results"], "offsets": [163, 164, 165]}, {"text": "promising implications", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["promising", "implications"], "offsets": [186, 187]}, {"text": "low - resource nlp pipelines involving human - like linguistic units", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["low", "-", "resource", "nlp", "pipelines", "involving", "human", "-", "like", "linguistic", "units"], "offsets": [189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199]}], "trigger": {"text": "have", "tokens": ["have"], "offsets": [185]}}], "document": ["we", "show", "that", "unsupervised", "sequence", "-", "segmentation", "performance", "can", "be", "transferred", "to", "extremely", "low", "-", "resource", "languages", "by", "pre", "-", "training", "a", "masked", "segmental", "language", "model", "(", "downey", "et", "al", ".", ",", "2021", ")", "multilingually", ".", "further", ",", "we", "show", "that", "this", "transfer", "can", "be", "achieved", "by", "training", "over", "a", "collection", "of", "low", "-", "resource", "languages", "that", "are", "typologically", "similar", "(", "but", "phylogenetically", "unrelated", ")", "to", "the", "target", "language", ".", "in", "our", "experiments", ",", "we", "transfer", "from", "a", "collection", "of", "10", "indigenous", "american", "languages", "(", "americasnlp", ",", "mager", "et", "al", ".", ",", "2021", ")", "to", "k", "\u2019", "iche", "\u2019", ",", "a", "mayan", "language", ".", "we", "compare", "our", "multilingual", "model", "to", "a", "monolingual", "(", "from", "-", "scratch", ")", "baseline", ",", "as", "well", "as", "a", "model", "pre", "-", "trained", "on", "quechua", "only", ".", "we", "show", "that", "the", "multilingual", "pre", "-", "trained", "approach", "yields", "consistent", "segmentation", "quality", "across", "target", "dataset", "sizes", ",", "exceeding", "the", "monolingual", "baseline", "in", "6", "/", "10", "experimental", "settings", ".", "our", "model", "yields", "especially", "strong", "results", "at", "small", "target", "sizes", ",", "including", "a", "zero", "-", "shot", "performance", "of", "20", ".", "6", "f1", ".", "these", "results", "have", "promising", "implications", "for", "low", "-", "resource", "nlp", "pipelines", "involving", "human", "-", "like", "linguistic", "units", ",", "such", "as", "the", "sparse", "transcription", "framework", "proposed", "by", "bird", "(", "2020", ")", "."]}, {"venue": "ACL", "title": "The Paradigm Discovery Problem", "abstract": "This work treats the paradigm discovery problem (PDP), the task of learning an inflectional morphological system from unannotated sentences. We formalize the PDP and develop evaluation metrics for judging systems. Using currently available resources, we construct datasets for the task. We also devise a heuristic benchmark for the PDP and report empirical results on five diverse languages. Our benchmark system first makes use of word embeddings and string similarity to cluster forms by cell and by paradigm. Then, we bootstrap a neural transducer on top of the clustered data to predict words to realize the empty paradigm slots. An error analysis of our system suggests clustering by cell across different inflection classes is the most pressing challenge for future work.", "doc_id": "3b57c5038a471d6113047d7db39d1ee1", "publication_year": 2020, "sentences": ["this work treats the paradigm discovery problem ( pdp ) , the task of learning an inflectional morphological system from unannotated sentences .", "we formalize the pdp and develop evaluation metrics for judging systems .", "using currently available resources , we construct datasets for the task .", "we also devise a heuristic benchmark for the pdp and report empirical results on five diverse languages .", "our benchmark system first makes use of word embeddings and string similarity to cluster forms by cell and by paradigm .", "then , we bootstrap a neural transducer on top of the clustered data to predict words to realize the empty paradigm slots .", "an error analysis of our system suggests clustering by cell across different inflection classes is the most pressing challenge for future work ."], "events": [{"event_type": "ITT", "arguments": [{"text": "paradigm discovery problem", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["paradigm", "discovery", "problem"], "offsets": [4, 5, 6]}], "trigger": {"text": "treats", "tokens": ["treats"], "offsets": [2]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [23]}, {"text": "paradigm discovery problem", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["paradigm", "discovery", "problem"], "offsets": [4, 5, 6]}], "trigger": {"text": "formalize", "tokens": ["formalize"], "offsets": [24]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [23]}, {"text": "evaluation metrics", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["evaluation", "metrics"], "offsets": [29, 30]}, {"text": "judging systems", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["judging", "systems"], "offsets": [32, 33]}], "trigger": {"text": "develop", "tokens": ["develop"], "offsets": [28]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [40]}, {"text": "datasets", "nugget_type": "DST", "argument_type": "Content", "tokens": ["datasets"], "offsets": [42]}], "trigger": {"text": "construct", "tokens": ["construct"], "offsets": [41]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [47]}, {"text": "heuristic benchmark", "nugget_type": "APP", "argument_type": "Content", "tokens": ["heuristic", "benchmark"], "offsets": [51, 52]}, {"text": "paradigm discovery problem", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["paradigm", "discovery", "problem"], "offsets": [4, 5, 6]}], "trigger": {"text": "devise", "tokens": ["devise"], "offsets": [49]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [47]}, {"text": "empirical results on five diverse languages", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["empirical", "results", "on", "five", "diverse", "languages"], "offsets": [58, 59, 60, 61, 62, 63]}], "trigger": {"text": "report", "tokens": ["report"], "offsets": [57]}}, {"event_type": "MDS", "arguments": [{"text": "cell", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["cell"], "offsets": [81]}, {"text": "paradigm", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["paradigm"], "offsets": [84]}, {"text": "word embeddings", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["word", "embeddings"], "offsets": [72, 73]}, {"text": "string similarity", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["string", "similarity"], "offsets": [75, 76]}, {"text": "cluster", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["cluster"], "offsets": [78]}], "trigger": {"text": "makes", "tokens": ["makes"], "offsets": [69]}}, {"event_type": "PUR", "arguments": [{"text": "forms", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["forms"], "offsets": [79]}], "trigger": {"text": "cluster", "tokens": ["cluster"], "offsets": [78]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [88]}, {"text": "neural transducer on top of the clustered data", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["neural", "transducer", "on", "top", "of", "the", "clustered", "data"], "offsets": [91, 92, 93, 94, 95, 96, 97, 98]}, {"text": "predict", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["predict"], "offsets": [100]}], "trigger": {"text": "bootstrap", "tokens": ["bootstrap"], "offsets": [89]}}, {"event_type": "PUR", "arguments": [{"text": "words", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["words"], "offsets": [101]}], "trigger": {"text": "predict", "tokens": ["predict"], "offsets": [100]}}, {"event_type": "FIN", "arguments": [{"text": "challenge", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["challenge"], "offsets": [127]}], "trigger": {"text": "suggests", "tokens": ["suggests"], "offsets": [115]}}, {"event_type": "FAC", "arguments": [{"text": "clustering by cell across different inflection classes", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["clustering", "by", "cell", "across", "different", "inflection", "classes"], "offsets": [116, 117, 118, 119, 120, 121, 122]}], "trigger": {"text": "challenge", "tokens": ["challenge"], "offsets": [127]}}], "document": ["this", "work", "treats", "the", "paradigm", "discovery", "problem", "(", "pdp", ")", ",", "the", "task", "of", "learning", "an", "inflectional", "morphological", "system", "from", "unannotated", "sentences", ".", "we", "formalize", "the", "pdp", "and", "develop", "evaluation", "metrics", "for", "judging", "systems", ".", "using", "currently", "available", "resources", ",", "we", "construct", "datasets", "for", "the", "task", ".", "we", "also", "devise", "a", "heuristic", "benchmark", "for", "the", "pdp", "and", "report", "empirical", "results", "on", "five", "diverse", "languages", ".", "our", "benchmark", "system", "first", "makes", "use", "of", "word", "embeddings", "and", "string", "similarity", "to", "cluster", "forms", "by", "cell", "and", "by", "paradigm", ".", "then", ",", "we", "bootstrap", "a", "neural", "transducer", "on", "top", "of", "the", "clustered", "data", "to", "predict", "words", "to", "realize", "the", "empty", "paradigm", "slots", ".", "an", "error", "analysis", "of", "our", "system", "suggests", "clustering", "by", "cell", "across", "different", "inflection", "classes", "is", "the", "most", "pressing", "challenge", "for", "future", "work", "."]}, {"venue": "ACL", "title": "Effective Inter-Clause Modeling for End-to-End Emotion-Cause Pair Extraction", "abstract": "Emotion-cause pair extraction aims to extract all emotion clauses coupled with their cause clauses from a given document. Previous work employs two-step approaches, in which the first step extracts emotion clauses and cause clauses separately, and the second step trains a classifier to filter out negative pairs. However, such pipeline-style system for emotion-cause pair extraction is suboptimal because it suffers from error propagation and the two steps may not adapt to each other well. In this paper, we tackle emotion-cause pair extraction from a ranking perspective, i.e., ranking clause pair candidates in a document, and propose a one-step neural approach which emphasizes inter-clause modeling to perform end-to-end extraction. It models the interrelations between the clauses in a document to learn clause representations with graph attention, and enhances clause pair representations with kernel-based relative position embedding for effective ranking. Experimental results show that our approach significantly outperforms the current two-step systems, especially in the condition of extracting multiple pairs in one document.", "doc_id": "b43a06584df70af2bbf7338d8a878ef9", "publication_year": 2020, "sentences": ["emotion - cause pair extraction aims to extract all emotion clauses coupled with their cause clauses from a given document .", "previous work employs two - step approaches , in which the first step extracts emotion clauses and cause clauses separately , and the second step trains a classifier to filter out negative pairs .", "however , such pipeline - style system for emotion - cause pair extraction is suboptimal because it suffers from error propagation and the two steps may not adapt to each other well .", "in this paper , we tackle emotion - cause pair extraction from a ranking perspective , i . e . , ranking clause pair candidates in a document , and propose a one - step neural approach which emphasizes inter - clause modeling to perform end - to - end extraction .", "it models the interrelations between the clauses in a document to learn clause representations with graph attention , and enhances clause pair representations with kernel - based relative position embedding for effective ranking .", "experimental results show that our approach significantly outperforms the current two - step systems , especially in the condition of extracting multiple pairs in one document ."], "events": [{"event_type": "ITT", "arguments": [{"text": "emotion - cause pair extraction", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["emotion", "-", "cause", "pair", "extraction"], "offsets": [0, 1, 2, 3, 4]}], "trigger": {"text": "extract", "tokens": ["extract"], "offsets": [7]}}, {"event_type": "RWS", "arguments": [{"text": "two - step approaches", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["two", "-", "step", "approaches"], "offsets": [24, 25, 26, 27]}, {"text": "emotion clauses", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["emotion", "clauses"], "offsets": [35, 36]}, {"text": "cause clauses", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["cause", "clauses"], "offsets": [38, 39]}, {"text": "separately", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["separately"], "offsets": [40]}], "trigger": {"text": "extracts", "tokens": ["extracts"], "offsets": [34]}}, {"event_type": "RWS", "arguments": [{"text": "two - step approaches", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["two", "-", "step", "approaches"], "offsets": [24, 25, 26, 27]}, {"text": "filter out", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["filter", "out"], "offsets": [50, 51]}, {"text": "classifier", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["classifier"], "offsets": [48]}], "trigger": {"text": "trains", "tokens": ["trains"], "offsets": [46]}}, {"event_type": "PUR", "arguments": [{"text": "negative pairs", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["negative", "pairs"], "offsets": [52, 53]}], "trigger": {"text": "filter out", "tokens": ["filter", "out"], "offsets": [50, 51]}}, {"event_type": "RWF", "arguments": [{"text": "suboptimal", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["suboptimal"], "offsets": [69]}, {"text": "pipeline - style system", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["pipeline", "-", "style", "system"], "offsets": [58, 59, 60, 61]}], "trigger": {"text": "suboptimal", "tokens": ["suboptimal"], "offsets": [69]}}, {"event_type": "RWF", "arguments": [{"text": "suffers", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["suffers"], "offsets": [72]}, {"text": "error propagation", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["error", "propagation"], "offsets": [74, 75]}], "trigger": {"text": "suffers", "tokens": ["suffers"], "offsets": [72]}}, {"event_type": "RWF", "arguments": [{"text": "two steps", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["two", "steps"], "offsets": [78, 79]}], "trigger": {"text": "not adapt to each other well", "tokens": ["not", "adapt", "to", "each", "other", "well"], "offsets": [81, 82, 83, 84, 85, 86]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [92]}, {"text": "from a ranking perspective", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "a", "ranking", "perspective"], "offsets": [99, 100, 101, 102]}, {"text": "emotion - cause pair extraction", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["emotion", "-", "cause", "pair", "extraction"], "offsets": [94, 95, 96, 97, 98]}], "trigger": {"text": "tackle", "tokens": ["tackle"], "offsets": [93]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [92]}, {"text": "one - step neural approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["one", "-", "step", "neural", "approach"], "offsets": [120, 121, 122, 123, 124]}, {"text": "perform", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["perform"], "offsets": [132]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [118]}}, {"event_type": "PUR", "arguments": [{"text": "end - to - end extraction", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["end", "-", "to", "-", "end", "extraction"], "offsets": [133, 134, 135, 136, 137, 138]}], "trigger": {"text": "perform", "tokens": ["perform"], "offsets": [132]}}, {"event_type": "WKS", "arguments": [{"text": "inter - clause modeling", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["inter", "-", "clause", "modeling"], "offsets": [127, 128, 129, 130]}, {"text": "perform", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["perform"], "offsets": [132]}], "trigger": {"text": "emphasizes", "tokens": ["emphasizes"], "offsets": [126]}}, {"event_type": "MDS", "arguments": [{"text": "learn", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["learn"], "offsets": [151]}, {"text": "interrelations", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["interrelations"], "offsets": [143]}, {"text": "clauses", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["clauses"], "offsets": [146]}], "trigger": {"text": "models", "tokens": ["models"], "offsets": [141]}}, {"event_type": "PUR", "arguments": [{"text": "clause representations", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["clause", "representations"], "offsets": [152, 153]}, {"text": "with graph attention", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "graph", "attention"], "offsets": [154, 155, 156]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [151]}}, {"event_type": "MDS", "arguments": [{"text": "clause pair representations", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["clause", "pair", "representations"], "offsets": [160, 161, 162]}, {"text": "kernel - based relative position embedding", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["kernel", "-", "based", "relative", "position", "embedding"], "offsets": [164, 165, 166, 167, 168, 169]}, {"text": "effective ranking", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["effective", "ranking"], "offsets": [171, 172]}], "trigger": {"text": "enhances", "tokens": ["enhances"], "offsets": [159]}}, {"event_type": "FIN", "arguments": [{"text": "significantly outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["significantly", "outperforms"], "offsets": [180, 181]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [176]}}, {"event_type": "CMP", "arguments": [{"text": "one - step neural approach", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["one", "-", "step", "neural", "approach"], "offsets": [120, 121, 122, 123, 124]}, {"text": "current two - step systems", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["current", "two", "-", "step", "systems"], "offsets": [183, 184, 185, 186, 187]}], "trigger": {"text": "significantly outperforms", "tokens": ["significantly", "outperforms"], "offsets": [180, 181]}}], "document": ["emotion", "-", "cause", "pair", "extraction", "aims", "to", "extract", "all", "emotion", "clauses", "coupled", "with", "their", "cause", "clauses", "from", "a", "given", "document", ".", "previous", "work", "employs", "two", "-", "step", "approaches", ",", "in", "which", "the", "first", "step", "extracts", "emotion", "clauses", "and", "cause", "clauses", "separately", ",", "and", "the", "second", "step", "trains", "a", "classifier", "to", "filter", "out", "negative", "pairs", ".", "however", ",", "such", "pipeline", "-", "style", "system", "for", "emotion", "-", "cause", "pair", "extraction", "is", "suboptimal", "because", "it", "suffers", "from", "error", "propagation", "and", "the", "two", "steps", "may", "not", "adapt", "to", "each", "other", "well", ".", "in", "this", "paper", ",", "we", "tackle", "emotion", "-", "cause", "pair", "extraction", "from", "a", "ranking", "perspective", ",", "i", ".", "e", ".", ",", "ranking", "clause", "pair", "candidates", "in", "a", "document", ",", "and", "propose", "a", "one", "-", "step", "neural", "approach", "which", "emphasizes", "inter", "-", "clause", "modeling", "to", "perform", "end", "-", "to", "-", "end", "extraction", ".", "it", "models", "the", "interrelations", "between", "the", "clauses", "in", "a", "document", "to", "learn", "clause", "representations", "with", "graph", "attention", ",", "and", "enhances", "clause", "pair", "representations", "with", "kernel", "-", "based", "relative", "position", "embedding", "for", "effective", "ranking", ".", "experimental", "results", "show", "that", "our", "approach", "significantly", "outperforms", "the", "current", "two", "-", "step", "systems", ",", "especially", "in", "the", "condition", "of", "extracting", "multiple", "pairs", "in", "one", "document", "."]}, {"venue": "ACL", "title": "Few-Shot Class-Incremental Learning for Named Entity Recognition", "abstract": "Previous work of class-incremental learning for Named Entity Recognition (NER) relies on the assumption that there exists abundance of labeled data for the training of new classes. In this work, we study a more challenging but practical problem, i.e., few-shot class-incremental learning for NER, where an NER model is trained with only few labeled samples of the new classes, without forgetting knowledge of the old ones. To alleviate the problem of catastrophic forgetting in few-shot class-incremental learning, we reconstruct synthetic training data of the old classes using the trained NER model, augmenting the training of new classes. We further develop a framework that distills from the existing model with both synthetic data, and real data from the current training set. Experimental results show that our approach achieves significant improvements over existing baselines.", "doc_id": "427ab6f50467a222122817d597db83a8", "publication_year": 2022, "sentences": ["previous work of class - incremental learning for named entity recognition ( ner ) relies on the assumption that there exists abundance of labeled data for the training of new classes .", "in this work , we study a more challenging but practical problem , i . e . , few - shot class - incremental learning for ner , where an ner model is trained with only few labeled samples of the new classes , without forgetting knowledge of the old ones .", "to alleviate the problem of catastrophic forgetting in few - shot class - incremental learning , we reconstruct synthetic training data of the old classes using the trained ner model , augmenting the training of new classes .", "we further develop a framework that distills from the existing model with both synthetic data , and real data from the current training set .", "experimental results show that our approach achieves significant improvements over existing baselines ."], "events": [{"event_type": "ITT", "arguments": [{"text": "named entity recognition", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["named", "entity", "recognition"], "offsets": [8, 9, 10]}], "trigger": {"text": "relies", "tokens": ["relies"], "offsets": [14]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [36]}, {"text": "few - shot class - incremental learning", "nugget_type": "APP", "argument_type": "Content", "tokens": ["few", "-", "shot", "class", "-", "incremental", "learning"], "offsets": [50, 51, 52, 53, 54, 55, 56]}, {"text": "ner", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["named", "entity", "recognition"], "offsets": [8, 9, 10]}], "trigger": {"text": "study", "tokens": ["study"], "offsets": [37]}}, {"event_type": "RWS", "arguments": [{"text": "ner model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["ner", "model"], "offsets": [62, 63]}, {"text": "few labeled samples", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["few", "labeled", "samples"], "offsets": [68, 69, 70]}, {"text": "new classes", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["new", "classes"], "offsets": [73, 74]}, {"text": "without forgetting knowledge of the old ones", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["without", "forgetting", "knowledge", "of", "the", "old", "classes"], "offsets": [76, 77, 78, 79, 80, 81, 74]}], "trigger": {"text": "trained", "tokens": ["trained"], "offsets": [65]}}, {"event_type": "RWF", "arguments": [{"text": "few - shot class - incremental learning", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["few", "-", "shot", "class", "-", "incremental", "learning"], "offsets": [92, 93, 94, 95, 96, 97, 98]}], "trigger": {"text": "forgetting", "tokens": ["forgetting"], "offsets": [90]}}, {"event_type": "MDS", "arguments": [{"text": "trained ner model", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["trained", "named", "entity", "recognition", "model"], "offsets": [111, 8, 9, 10, 113]}, {"text": "synthetic training data", "nugget_type": "DST", "argument_type": "TriedComponent", "tokens": ["synthetic", "training", "data"], "offsets": [102, 103, 104]}, {"text": "old classes", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["old", "classes"], "offsets": [107, 108]}, {"text": "augmenting", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["augmenting"], "offsets": [115]}], "trigger": {"text": "reconstruct", "tokens": ["reconstruct"], "offsets": [101]}}, {"event_type": "PUR", "arguments": [{"text": "training of new classes", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["training", "of", "new", "classes"], "offsets": [117, 118, 119, 120]}], "trigger": {"text": "augmenting", "tokens": ["augmenting"], "offsets": [115]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [122]}, {"text": "framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["framework"], "offsets": [126]}, {"text": "distills", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["distills"], "offsets": [128]}], "trigger": {"text": "develop", "tokens": ["develop"], "offsets": [124]}}, {"event_type": "PUR", "arguments": [{"text": "synthetic data", "nugget_type": "DST", "argument_type": "Aim", "tokens": ["synthetic", "data"], "offsets": [135, 136]}, {"text": "real data", "nugget_type": "DST", "argument_type": "Aim", "tokens": ["real", "data"], "offsets": [139, 140]}, {"text": "from the current training set", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "the", "current", "training", "set"], "offsets": [141, 142, 143, 144, 145]}, {"text": "from the existing model", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "the", "existing", "model"], "offsets": [129, 130, 131, 132]}], "trigger": {"text": "distills", "tokens": ["distills"], "offsets": [128]}}, {"event_type": "CMP", "arguments": [{"text": "significant improvements", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["significant", "improvements"], "offsets": [154, 155]}, {"text": "existing baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["existing", "baselines"], "offsets": [157, 158]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [153]}}], "document": ["previous", "work", "of", "class", "-", "incremental", "learning", "for", "named", "entity", "recognition", "(", "ner", ")", "relies", "on", "the", "assumption", "that", "there", "exists", "abundance", "of", "labeled", "data", "for", "the", "training", "of", "new", "classes", ".", "in", "this", "work", ",", "we", "study", "a", "more", "challenging", "but", "practical", "problem", ",", "i", ".", "e", ".", ",", "few", "-", "shot", "class", "-", "incremental", "learning", "for", "ner", ",", "where", "an", "ner", "model", "is", "trained", "with", "only", "few", "labeled", "samples", "of", "the", "new", "classes", ",", "without", "forgetting", "knowledge", "of", "the", "old", "ones", ".", "to", "alleviate", "the", "problem", "of", "catastrophic", "forgetting", "in", "few", "-", "shot", "class", "-", "incremental", "learning", ",", "we", "reconstruct", "synthetic", "training", "data", "of", "the", "old", "classes", "using", "the", "trained", "ner", "model", ",", "augmenting", "the", "training", "of", "new", "classes", ".", "we", "further", "develop", "a", "framework", "that", "distills", "from", "the", "existing", "model", "with", "both", "synthetic", "data", ",", "and", "real", "data", "from", "the", "current", "training", "set", ".", "experimental", "results", "show", "that", "our", "approach", "achieves", "significant", "improvements", "over", "existing", "baselines", "."]}, {"venue": "ACL", "title": "BLEURT: Learning Robust Metrics for Text Generation", "abstract": "Text generation has made significant advances in the last few years. Yet, evaluation metrics have lagged behind, as the most popular choices (e.g., BLEU and ROUGE) may correlate poorly with human judgment. We propose BLEURT, a learned evaluation metric for English based on BERT. BLEURT can model human judgment with a few thousand possibly biased training examples. A key aspect of our approach is a novel pre-training scheme that uses millions of synthetic examples to help the model generalize. BLEURT provides state-of-the-art results on the last three years of the WMT Metrics shared task and the WebNLG data set. In contrast to a vanilla BERT-based approach, it yields superior results even when the training data is scarce and out-of-distribution.", "doc_id": "f38d277a2c8b1435c6ad9b0d26a89de8", "publication_year": 2020, "sentences": ["text generation has made significant advances in the last few years .", "yet , evaluation metrics have lagged behind , as the most popular choices ( e . g . , bleu and rouge ) may correlate poorly with human judgment .", "we propose bleurt , a learned evaluation metric for english based on bert .", "bleurt can model human judgment with a few thousand possibly biased training examples .", "a key aspect of our approach is a novel pre - training scheme that uses millions of synthetic examples to help the model generalize .", "bleurt provides state - of - the - art results on the last three years of the wmt metrics shared task and the webnlg data set .", "in contrast to a vanilla bert - based approach , it yields superior results even when the training data is scarce and out - of - distribution ."], "events": [{"event_type": "ITT", "arguments": [{"text": "text generation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["text", "generation"], "offsets": [0, 1]}], "trigger": {"text": "made", "tokens": ["made"], "offsets": [3]}}, {"event_type": "RWF", "arguments": [{"text": "lagged behind", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["lagged", "behind"], "offsets": [17, 18]}], "trigger": {"text": "lagged behind", "tokens": ["lagged", "behind"], "offsets": [17, 18]}}, {"event_type": "RWF", "arguments": [{"text": "most popular choices", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["most", "popular", "choices"], "offsets": [22, 23, 24]}, {"text": "poorly", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["poorly"], "offsets": [37]}], "trigger": {"text": "correlate", "tokens": ["correlate"], "offsets": [36]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [42]}, {"text": "learned evaluation metric for english", "nugget_type": "APP", "argument_type": "Content", "tokens": ["learned", "evaluation", "metric", "for", "english"], "offsets": [47, 48, 49, 50, 51]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [43]}}, {"event_type": "MDS", "arguments": [{"text": "human judgment", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["human", "judgment"], "offsets": [59, 60]}, {"text": "biased training examples", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["biased", "training", "examples"], "offsets": [66, 67, 68]}], "trigger": {"text": "model", "tokens": ["model"], "offsets": [58]}}, {"event_type": "MDS", "arguments": [{"text": "pre - training scheme", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["pre", "-", "training", "scheme"], "offsets": [79, 80, 81, 82]}, {"text": "synthetic examples", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["synthetic", "examples"], "offsets": [87, 88]}, {"text": "help", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["help"], "offsets": [90]}], "trigger": {"text": "uses", "tokens": ["uses"], "offsets": [84]}}, {"event_type": "PUR", "arguments": [{"text": "model generalize", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["model", "generalize"], "offsets": [92, 93]}], "trigger": {"text": "help", "tokens": ["help"], "offsets": [90]}}, {"event_type": "FAC", "arguments": [{"text": "bleurt", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["bleurt"], "offsets": [95]}, {"text": "wmt metrics shared task", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["wmt", "metrics", "shared", "task"], "offsets": [112, 113, 114, 115]}, {"text": "state - of - the - art results", "nugget_type": "STR", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art", "results"], "offsets": [97, 98, 99, 100, 101, 102, 103, 104]}, {"text": "webnlg data set", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["webnlg", "data", "set"], "offsets": [118, 119, 120]}], "trigger": {"text": "provides", "tokens": ["provides"], "offsets": [96]}}, {"event_type": "CMP", "arguments": [{"text": "vanilla bert - based approach", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["vanilla", "bert", "-", "based", "approach"], "offsets": [126, 127, 128, 129, 130]}, {"text": "superior results", "nugget_type": "STR", "argument_type": "Result", "tokens": ["superior", "results"], "offsets": [134, 135]}, {"text": "when the training data is scarce and out - of - distribution", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "the", "training", "data", "is", "scarce", "and", "out", "-", "of", "-", "distribution"], "offsets": [137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148]}, {"text": "bleurt", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["bleurt"], "offsets": [95]}], "trigger": {"text": "yields", "tokens": ["yields"], "offsets": [133]}}], "document": ["text", "generation", "has", "made", "significant", "advances", "in", "the", "last", "few", "years", ".", "yet", ",", "evaluation", "metrics", "have", "lagged", "behind", ",", "as", "the", "most", "popular", "choices", "(", "e", ".", "g", ".", ",", "bleu", "and", "rouge", ")", "may", "correlate", "poorly", "with", "human", "judgment", ".", "we", "propose", "bleurt", ",", "a", "learned", "evaluation", "metric", "for", "english", "based", "on", "bert", ".", "bleurt", "can", "model", "human", "judgment", "with", "a", "few", "thousand", "possibly", "biased", "training", "examples", ".", "a", "key", "aspect", "of", "our", "approach", "is", "a", "novel", "pre", "-", "training", "scheme", "that", "uses", "millions", "of", "synthetic", "examples", "to", "help", "the", "model", "generalize", ".", "bleurt", "provides", "state", "-", "of", "-", "the", "-", "art", "results", "on", "the", "last", "three", "years", "of", "the", "wmt", "metrics", "shared", "task", "and", "the", "webnlg", "data", "set", ".", "in", "contrast", "to", "a", "vanilla", "bert", "-", "based", "approach", ",", "it", "yields", "superior", "results", "even", "when", "the", "training", "data", "is", "scarce", "and", "out", "-", "of", "-", "distribution", "."]}, {"venue": "ACL", "title": "A Variational Hierarchical Model for Neural Cross-Lingual Summarization", "abstract": "The goal of the cross-lingual summarization (CLS) is to convert a document in one language (e.g., English) to a summary in another one (e.g., Chinese). The CLS task is essentially the combination of machine translation (MT) and monolingual summarization (MS), and thus there exists the hierarchical relationship between MT&MS and CLS. Existing studies on CLS mainly focus on utilizing pipeline methods or jointly training an end-to-end model through an auxiliary MT or MS objective. However, it is very challenging for the model to directly conduct CLS as it requires both the abilities to translate and summarize. To address this issue, we propose a hierarchical model for the CLS task, based on the conditional variational auto-encoder. The hierarchical model contains two kinds of latent variables at the local and global levels, respectively. At the local level, there are two latent variables, one for translation and the other for summarization. As for the global level, there is another latent variable for cross-lingual summarization conditioned on the two local-level variables. Experiments on two language directions (English-Chinese) verify the effectiveness and superiority of the proposed approach. In addition, we show that our model is able to generate better cross-lingual summaries than comparison models in the few-shot setting.", "doc_id": "96d0dba9ad4247ad01897d38b20aa1d1", "publication_year": 2022, "sentences": ["the goal of the cross - lingual summarization ( cls ) is to convert a document in one language ( e . g . , english ) to a summary in another one ( e . g . , chinese ) .", "the cls task is essentially the combination of machine translation ( mt ) and monolingual summarization ( ms ) , and thus there exists the hierarchical relationship between mt & ms and cls .", "existing studies on cls mainly focus on utilizing pipeline methods or jointly training an end - to - end model through an auxiliary mt or ms objective .", "however , it is very challenging for the model to directly conduct cls as it requires both the abilities to translate and summarize .", "to address this issue , we propose a hierarchical model for the cls task , based on the conditional variational auto - encoder .", "the hierarchical model contains two kinds of latent variables at the local and global levels , respectively .", "at the local level , there are two latent variables , one for translation and the other for summarization .", "as for the global level , there is another latent variable for cross - lingual summarization conditioned on the two local - level variables .", "experiments on two language directions ( english - chinese ) verify the effectiveness and superiority of the proposed approach .", "in addition , we show that our model is able to generate better cross - lingual summaries than comparison models in the few - shot setting ."], "events": [{"event_type": "ITT", "arguments": [{"text": "cross - lingual summarization", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["cross", "-", "lingual", "summarization"], "offsets": [4, 5, 6, 7]}], "trigger": {"text": "goal", "tokens": ["goal"], "offsets": [1]}}, {"event_type": "RWF", "arguments": [{"text": "directly conduct", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["directly", "conduct"], "offsets": [114, 115]}, {"text": "model", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["model"], "offsets": [112]}], "trigger": {"text": "challenging", "tokens": ["challenging"], "offsets": [109]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [133]}, {"text": "hierarchical model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["hierarchical", "model"], "offsets": [136, 137]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [134]}}, {"event_type": "FAC", "arguments": [{"text": "on two language directions", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "two", "language", "directions"], "offsets": [216, 217, 218, 219]}, {"text": "effectiveness and superiority", "nugget_type": "STR", "argument_type": "Subject", "tokens": ["effectiveness", "and", "superiority"], "offsets": [227, 228, 229]}], "trigger": {"text": "verify", "tokens": ["verify"], "offsets": [225]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [238]}, {"text": "generate", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["generate"], "offsets": [246]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [239]}}, {"event_type": "CMP", "arguments": [{"text": "in the few - shot setting", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "few", "-", "shot", "setting"], "offsets": [255, 256, 257, 258, 259, 260]}, {"text": "comparison models", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["comparison", "models"], "offsets": [253, 254]}, {"text": "hierarchical model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["hierarchical", "model"], "offsets": [136, 137]}, {"text": "better cross - lingual summaries", "nugget_type": "STR", "argument_type": "Result", "tokens": ["better", "cross", "-", "lingual", "summaries"], "offsets": [247, 248, 249, 250, 251]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [246]}}, {"event_type": "PUR", "arguments": [{"text": "cross - lingual summarization", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["cross", "-", "lingual", "summarization"], "offsets": [4, 5, 6, 7]}], "trigger": {"text": "directly conduct", "tokens": ["directly", "conduct"], "offsets": [114, 115]}}], "document": ["the", "goal", "of", "the", "cross", "-", "lingual", "summarization", "(", "cls", ")", "is", "to", "convert", "a", "document", "in", "one", "language", "(", "e", ".", "g", ".", ",", "english", ")", "to", "a", "summary", "in", "another", "one", "(", "e", ".", "g", ".", ",", "chinese", ")", ".", "the", "cls", "task", "is", "essentially", "the", "combination", "of", "machine", "translation", "(", "mt", ")", "and", "monolingual", "summarization", "(", "ms", ")", ",", "and", "thus", "there", "exists", "the", "hierarchical", "relationship", "between", "mt", "&", "ms", "and", "cls", ".", "existing", "studies", "on", "cls", "mainly", "focus", "on", "utilizing", "pipeline", "methods", "or", "jointly", "training", "an", "end", "-", "to", "-", "end", "model", "through", "an", "auxiliary", "mt", "or", "ms", "objective", ".", "however", ",", "it", "is", "very", "challenging", "for", "the", "model", "to", "directly", "conduct", "cls", "as", "it", "requires", "both", "the", "abilities", "to", "translate", "and", "summarize", ".", "to", "address", "this", "issue", ",", "we", "propose", "a", "hierarchical", "model", "for", "the", "cls", "task", ",", "based", "on", "the", "conditional", "variational", "auto", "-", "encoder", ".", "the", "hierarchical", "model", "contains", "two", "kinds", "of", "latent", "variables", "at", "the", "local", "and", "global", "levels", ",", "respectively", ".", "at", "the", "local", "level", ",", "there", "are", "two", "latent", "variables", ",", "one", "for", "translation", "and", "the", "other", "for", "summarization", ".", "as", "for", "the", "global", "level", ",", "there", "is", "another", "latent", "variable", "for", "cross", "-", "lingual", "summarization", "conditioned", "on", "the", "two", "local", "-", "level", "variables", ".", "experiments", "on", "two", "language", "directions", "(", "english", "-", "chinese", ")", "verify", "the", "effectiveness", "and", "superiority", "of", "the", "proposed", "approach", ".", "in", "addition", ",", "we", "show", "that", "our", "model", "is", "able", "to", "generate", "better", "cross", "-", "lingual", "summaries", "than", "comparison", "models", "in", "the", "few", "-", "shot", "setting", "."]}, {"venue": "ACL", "title": "Improving Image Captioning with Better Use of Caption", "abstract": "Image captioning is a multimodal problem that has drawn extensive attention in both the natural language processing and computer vision community. In this paper, we present a novel image captioning architecture to better explore semantics available in captions and leverage that to enhance both image representation and caption generation. Our models first construct caption-guided visual relationship graphs that introduce beneficial inductive bias using weakly supervised multi-instance learning. The representation is then enhanced with neighbouring and contextual nodes with their textual and visual features. During generation, the model further incorporates visual relationships using multi-task learning for jointly predicting word and object/predicate tag sequences. We perform extensive experiments on the MSCOCO dataset, showing that the proposed framework significantly outperforms the baselines, resulting in the state-of-the-art performance under a wide range of evaluation metrics. The code of our paper has been made publicly available.", "doc_id": "3d325af2cef1f9c25761ae0051531e90", "publication_year": 2020, "sentences": ["image captioning is a multimodal problem that has drawn extensive attention in both the natural language processing and computer vision community .", "in this paper , we present a novel image captioning architecture to better explore semantics available in captions and leverage that to enhance both image representation and caption generation .", "our models first construct caption - guided visual relationship graphs that introduce beneficial inductive bias using weakly supervised multi - instance learning .", "the representation is then enhanced with neighbouring and contextual nodes with their textual and visual features .", "during generation , the model further incorporates visual relationships using multi - task learning for jointly predicting word and object / predicate tag sequences .", "we perform extensive experiments on the mscoco dataset , showing that the proposed framework significantly outperforms the baselines , resulting in the state - of - the - art performance under a wide range of evaluation metrics .", "the code of our paper has been made publicly available ."], "events": [{"event_type": "ITT", "arguments": [{"text": "image captioning", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["image", "captioning"], "offsets": [0, 1]}], "trigger": {"text": "problem", "tokens": ["problem"], "offsets": [5]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [26]}, {"text": "image captioning architecture", "nugget_type": "APP", "argument_type": "Content", "tokens": ["image", "captioning", "architecture"], "offsets": [30, 31, 32]}, {"text": "better explore", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["better", "explore"], "offsets": [34, 35]}, {"text": "enhance", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["enhance"], "offsets": [44]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [27]}}, {"event_type": "PUR", "arguments": [{"text": "semantics available in captions", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["semantics", "available", "in", "captions"], "offsets": [36, 37, 38, 39]}], "trigger": {"text": "better explore", "tokens": ["better", "explore"], "offsets": [34, 35]}}, {"event_type": "PUR", "arguments": [{"text": "image representation", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["image", "representation"], "offsets": [46, 47]}, {"text": "caption generation", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["caption", "generation"], "offsets": [49, 50]}], "trigger": {"text": "enhance", "tokens": ["enhance"], "offsets": [44]}}, {"event_type": "MDS", "arguments": [{"text": "weakly supervised multi - instance learning", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["weakly", "supervised", "multi", "-", "instance", "learning"], "offsets": [68, 69, 70, 71, 72, 73]}, {"text": "caption - guided visual relationship graphs", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["caption", "-", "guided", "visual", "relationship", "graphs"], "offsets": [56, 57, 58, 59, 60, 61]}, {"text": "introduce", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["introduce"], "offsets": [63]}], "trigger": {"text": "construct", "tokens": ["construct"], "offsets": [55]}}, {"event_type": "PUR", "arguments": [{"text": "beneficial inductive bias", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["beneficial", "inductive", "bias"], "offsets": [64, 65, 66]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [63]}}, {"event_type": "MDS", "arguments": [{"text": "representation", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["representation"], "offsets": [76]}, {"text": "neighbouring nodes with their textual and visual features", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["neighbouring", "nodes", "with", "their", "textual", "and", "visual", "features"], "offsets": [81, 84, 85, 86, 87, 88, 89, 90]}, {"text": "contextual nodes with their textual and visual features", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["contextual", "nodes", "with", "their", "textual", "and", "visual", "features"], "offsets": [83, 84, 85, 86, 87, 88, 89, 90]}], "trigger": {"text": "enhanced", "tokens": ["enhanced"], "offsets": [79]}}, {"event_type": "MDS", "arguments": [{"text": "multi - task learning for jointly predicting word", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["multi", "-", "task", "learning", "for", "jointly", "predicting", "word"], "offsets": [102, 103, 104, 105, 106, 107, 108, 109]}, {"text": "object / predicate tag sequences", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["object", "/", "predicate", "tag", "sequences"], "offsets": [111, 112, 113, 114, 115]}, {"text": "visual relationships", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["visual", "relationships"], "offsets": [99, 100]}, {"text": "during generation", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["during", "generation"], "offsets": [92, 93]}], "trigger": {"text": "incorporates", "tokens": ["incorporates"], "offsets": [98]}}, {"event_type": "FIN", "arguments": [{"text": "outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforms"], "offsets": [132]}], "trigger": {"text": "showing", "tokens": ["showing"], "offsets": [126]}}, {"event_type": "CMP", "arguments": [{"text": "mscoco dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["mscoco", "dataset"], "offsets": [123, 124]}, {"text": "image captioning architecture", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["image", "captioning", "architecture"], "offsets": [30, 31, 32]}, {"text": "significantly", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significantly"], "offsets": [131]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [132]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [132]}}, {"event_type": "FAC", "arguments": [{"text": "state - of - the - art performance", "nugget_type": "STR", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art", "performance"], "offsets": [139, 140, 141, 142, 143, 144, 145, 146]}, {"text": "image captioning architecture", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["image", "captioning", "architecture"], "offsets": [30, 31, 32]}, {"text": "wide range of evaluation metrics", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["wide", "range", "of", "evaluation", "metrics"], "offsets": [149, 150, 151, 152, 153]}], "trigger": {"text": "resulting", "tokens": ["resulting"], "offsets": [136]}}], "document": ["image", "captioning", "is", "a", "multimodal", "problem", "that", "has", "drawn", "extensive", "attention", "in", "both", "the", "natural", "language", "processing", "and", "computer", "vision", "community", ".", "in", "this", "paper", ",", "we", "present", "a", "novel", "image", "captioning", "architecture", "to", "better", "explore", "semantics", "available", "in", "captions", "and", "leverage", "that", "to", "enhance", "both", "image", "representation", "and", "caption", "generation", ".", "our", "models", "first", "construct", "caption", "-", "guided", "visual", "relationship", "graphs", "that", "introduce", "beneficial", "inductive", "bias", "using", "weakly", "supervised", "multi", "-", "instance", "learning", ".", "the", "representation", "is", "then", "enhanced", "with", "neighbouring", "and", "contextual", "nodes", "with", "their", "textual", "and", "visual", "features", ".", "during", "generation", ",", "the", "model", "further", "incorporates", "visual", "relationships", "using", "multi", "-", "task", "learning", "for", "jointly", "predicting", "word", "and", "object", "/", "predicate", "tag", "sequences", ".", "we", "perform", "extensive", "experiments", "on", "the", "mscoco", "dataset", ",", "showing", "that", "the", "proposed", "framework", "significantly", "outperforms", "the", "baselines", ",", "resulting", "in", "the", "state", "-", "of", "-", "the", "-", "art", "performance", "under", "a", "wide", "range", "of", "evaluation", "metrics", ".", "the", "code", "of", "our", "paper", "has", "been", "made", "publicly", "available", "."]}, {"venue": "ACL", "title": "KM-BART: Knowledge Enhanced Multimodal BART for Visual Commonsense Generation", "abstract": "We present Knowledge Enhanced Multimodal BART (KM-BART), which is a Transformer-based sequence-to-sequence model capable of reasoning about commonsense knowledge from multimodal inputs of images and texts. We adapt the generative BART architecture (Lewis et al., 2020) to a multimodal model with visual and textual inputs. We further develop novel pretraining tasks to improve the model performance on the Visual Commonsense Generation (VCG) task. In particular, our pretraining task of Knowledge-based Commonsense Generation (KCG) boosts model performance on the VCG task by leveraging commonsense knowledge from a large language model pretrained on external commonsense knowledge graphs. To the best of our knowledge, we are the first to propose a dedicated task for improving model performance on the VCG task. Experimental results show that our model reaches state-of-the-art performance on the VCG task (Park et al., 2020) by applying these novel pretraining tasks.", "doc_id": "241acce3c8b68f9679b61612fa779d63", "publication_year": 2021, "sentences": ["we present knowledge enhanced multimodal bart ( km - bart ) , which is a transformer - based sequence - to - sequence model capable of reasoning about commonsense knowledge from multimodal inputs of images and texts .", "we adapt the generative bart architecture ( lewis et al . , 2020 ) to a multimodal model with visual and textual inputs .", "we further develop novel pretraining tasks to improve the model performance on the visual commonsense generation ( vcg ) task .", "in particular , our pretraining task of knowledge - based commonsense generation ( kcg ) boosts model performance on the vcg task by leveraging commonsense knowledge from a large language model pretrained on external commonsense knowledge graphs .", "to the best of our knowledge , we are the first to propose a dedicated task for improving model performance on the vcg task .", "experimental results show that our model reaches state - of - the - art performance on the vcg task ( park et al . , 2020 ) by applying these novel pretraining tasks ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [0]}, {"text": "knowledge enhanced multimodal bart", "nugget_type": "APP", "argument_type": "Content", "tokens": ["knowledge", "enhanced", "multimodal", "bart"], "offsets": [2, 3, 4, 5]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [1]}}, {"event_type": "MDS", "arguments": [{"text": "multimodal model", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["multimodal", "model"], "offsets": [54, 55]}, {"text": "generative bart architecture", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["generative", "bart", "architecture"], "offsets": [41, 42, 43]}, {"text": "visual and textual inputs", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["visual", "and", "textual", "inputs"], "offsets": [57, 58, 59, 60]}], "trigger": {"text": "adapt", "tokens": ["adapt"], "offsets": [39]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [62]}, {"text": "pretraining tasks", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["pretraining", "tasks"], "offsets": [66, 67]}, {"text": "improve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improve"], "offsets": [69]}], "trigger": {"text": "further develop", "tokens": ["further", "develop"], "offsets": [63, 64]}}, {"event_type": "PUR", "arguments": [{"text": "model performance on the visual commonsense generation ( vcg ) task", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["model", "performance", "on", "the", "visual", "commonsense", "generation", "task"], "offsets": [71, 72, 73, 74, 75, 76, 77, 81]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [69]}}, {"event_type": "FIN", "arguments": [{"text": "reaches", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["reaches"], "offsets": [152]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [148]}}, {"event_type": "FAC", "arguments": [{"text": "knowledge enhanced multimodal bart", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["knowledge", "enhanced", "multimodal", "bart"], "offsets": [2, 3, 4, 5]}, {"text": "state - of - the - art", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["state", "-", "of", "-", "the", "-", "art"], "offsets": [153, 154, 155, 156, 157, 158, 159]}, {"text": "performance", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["performance"], "offsets": [160]}], "trigger": {"text": "reaches", "tokens": ["reaches"], "offsets": [152]}}], "document": ["we", "present", "knowledge", "enhanced", "multimodal", "bart", "(", "km", "-", "bart", ")", ",", "which", "is", "a", "transformer", "-", "based", "sequence", "-", "to", "-", "sequence", "model", "capable", "of", "reasoning", "about", "commonsense", "knowledge", "from", "multimodal", "inputs", "of", "images", "and", "texts", ".", "we", "adapt", "the", "generative", "bart", "architecture", "(", "lewis", "et", "al", ".", ",", "2020", ")", "to", "a", "multimodal", "model", "with", "visual", "and", "textual", "inputs", ".", "we", "further", "develop", "novel", "pretraining", "tasks", "to", "improve", "the", "model", "performance", "on", "the", "visual", "commonsense", "generation", "(", "vcg", ")", "task", ".", "in", "particular", ",", "our", "pretraining", "task", "of", "knowledge", "-", "based", "commonsense", "generation", "(", "kcg", ")", "boosts", "model", "performance", "on", "the", "vcg", "task", "by", "leveraging", "commonsense", "knowledge", "from", "a", "large", "language", "model", "pretrained", "on", "external", "commonsense", "knowledge", "graphs", ".", "to", "the", "best", "of", "our", "knowledge", ",", "we", "are", "the", "first", "to", "propose", "a", "dedicated", "task", "for", "improving", "model", "performance", "on", "the", "vcg", "task", ".", "experimental", "results", "show", "that", "our", "model", "reaches", "state", "-", "of", "-", "the", "-", "art", "performance", "on", "the", "vcg", "task", "(", "park", "et", "al", ".", ",", "2020", ")", "by", "applying", "these", "novel", "pretraining", "tasks", "."]}, {"venue": "ACL", "title": "STEMM: Self-learning with Speech-text Manifold Mixup for Speech Translation", "abstract": "How to learn a better speech representation for end-to-end speech-to-text translation (ST) with limited labeled data? Existing techniques often attempt to transfer powerful machine translation (MT) capabilities to ST, but neglect the representation discrepancy across modalities. In this paper, we propose the Speech-TExt Manifold Mixup (STEMM) method to calibrate such discrepancy. Specifically, we mix up the representation sequences of different modalities, and take both unimodal speech sequences and multimodal mixed sequences as input to the translation model in parallel, and regularize their output predictions with a self-learning framework. Experiments on MuST-C speech translation benchmark and further analysis show that our method effectively alleviates the cross-modal representation discrepancy, and achieves significant improvements over a strong baseline on eight translation directions.", "doc_id": "d49e4edf72832da1d1b562ab96c0a9de", "publication_year": 2022, "sentences": ["how to learn a better speech representation for end - to - end speech - to - text translation ( st ) with limited labeled data ?", "existing techniques often attempt to transfer powerful machine translation ( mt ) capabilities to st , but neglect the representation discrepancy across modalities .", "in this paper , we propose the speech - text manifold mixup ( stemm ) method to calibrate such discrepancy .", "specifically , we mix up the representation sequences of different modalities , and take both unimodal speech sequences and multimodal mixed sequences as input to the translation model in parallel , and regularize their output predictions with a self - learning framework .", "experiments on must - c speech translation benchmark and further analysis show that our method effectively alleviates the cross - modal representation discrepancy , and achieves significant improvements over a strong baseline on eight translation directions ."], "events": [{"event_type": "RWF", "arguments": [{"text": "existing techniques", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["existing", "techniques"], "offsets": [27, 28]}, {"text": "neglect", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["neglect"], "offsets": [44]}], "trigger": {"text": "neglect", "tokens": ["neglect"], "offsets": [44]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [55]}, {"text": "speech - text manifold mixup ( stemm ) method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["speech", "-", "text", "manifold", "mixup", "(", "stemm", ")", "method"], "offsets": [58, 59, 60, 61, 62, 63, 64, 65, 66]}, {"text": "calibrate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["calibrate"], "offsets": [68]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [56]}}, {"event_type": "PUR", "arguments": [{"text": "representation discrepancy across modalities", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["representation", "discrepancy", "across", "modalities"], "offsets": [46, 47, 48, 49]}], "trigger": {"text": "calibrate", "tokens": ["calibrate"], "offsets": [68]}}, {"event_type": "MDS", "arguments": [{"text": "representation sequences of different modalities", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["representation", "sequences", "of", "different", "modalities"], "offsets": [78, 79, 80, 81, 82]}], "trigger": {"text": "mix", "tokens": ["mix"], "offsets": [75]}}, {"event_type": "MDS", "arguments": [{"text": "unimodal speech sequences", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["unimodal", "speech", "sequences"], "offsets": [87, 88, 89]}, {"text": "multimodal mixed sequences", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["multimodal", "mixed", "sequences"], "offsets": [91, 92, 93]}, {"text": "input", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["input"], "offsets": [95]}, {"text": "in parallel", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "parallel"], "offsets": [100, 101]}], "trigger": {"text": "take", "tokens": ["take"], "offsets": [85]}}, {"event_type": "MDS", "arguments": [{"text": "self - learning framework", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["self", "-", "learning", "framework"], "offsets": [110, 111, 112, 113]}, {"text": "output predictions", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["output", "predictions"], "offsets": [106, 107]}], "trigger": {"text": "regularize", "tokens": ["regularize"], "offsets": [104]}}, {"event_type": "FAC", "arguments": [{"text": "speech - text manifold mixup ( stemm ) method", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["speech", "-", "text", "manifold", "mixup", "(", "stemm", ")", "method"], "offsets": [58, 59, 60, 61, 62, 63, 64, 65, 66]}, {"text": "cross - modal representation discrepancy", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["cross", "-", "modal", "representation", "discrepancy"], "offsets": [133, 134, 135, 136, 137]}], "trigger": {"text": "effectively alleviates", "tokens": ["effectively", "alleviates"], "offsets": [130, 131]}}, {"event_type": "CMP", "arguments": [{"text": "speech - text manifold mixup ( stemm ) method", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["speech", "-", "text", "manifold", "mixup", "(", "stemm", ")", "method"], "offsets": [58, 59, 60, 61, 62, 63, 64, 65, 66]}, {"text": "significant", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significant"], "offsets": [141]}, {"text": "improvements", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["improvements"], "offsets": [142]}, {"text": "strong baseline", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["strong", "baseline"], "offsets": [145, 146]}, {"text": "on eight translation directions", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "eight", "translation", "directions"], "offsets": [147, 148, 149, 150]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [140]}}, {"event_type": "ITT", "arguments": [{"text": "end - to - end speech - to - text translation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["end", "-", "to", "-", "end", "speech", "-", "to", "-", "text", "translation"], "offsets": [8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [2]}}], "document": ["how", "to", "learn", "a", "better", "speech", "representation", "for", "end", "-", "to", "-", "end", "speech", "-", "to", "-", "text", "translation", "(", "st", ")", "with", "limited", "labeled", "data", "?", "existing", "techniques", "often", "attempt", "to", "transfer", "powerful", "machine", "translation", "(", "mt", ")", "capabilities", "to", "st", ",", "but", "neglect", "the", "representation", "discrepancy", "across", "modalities", ".", "in", "this", "paper", ",", "we", "propose", "the", "speech", "-", "text", "manifold", "mixup", "(", "stemm", ")", "method", "to", "calibrate", "such", "discrepancy", ".", "specifically", ",", "we", "mix", "up", "the", "representation", "sequences", "of", "different", "modalities", ",", "and", "take", "both", "unimodal", "speech", "sequences", "and", "multimodal", "mixed", "sequences", "as", "input", "to", "the", "translation", "model", "in", "parallel", ",", "and", "regularize", "their", "output", "predictions", "with", "a", "self", "-", "learning", "framework", ".", "experiments", "on", "must", "-", "c", "speech", "translation", "benchmark", "and", "further", "analysis", "show", "that", "our", "method", "effectively", "alleviates", "the", "cross", "-", "modal", "representation", "discrepancy", ",", "and", "achieves", "significant", "improvements", "over", "a", "strong", "baseline", "on", "eight", "translation", "directions", "."]}, {"venue": "ACL", "title": "C-MORE: Pretraining to Answer Open-Domain Questions by Consulting Millions of References", "abstract": "We consider the problem of pretraining a two-stage open-domain question answering (QA) system (retriever + reader) with strong transfer capabilities. The key challenge is how to construct a large amount of high-quality question-answer-context triplets without task-specific annotations. Specifically, the triplets should align well with downstream tasks by: (i) covering a wide range of domains (for open-domain applications), (ii) linking a question to its semantically relevant context with supporting evidence (for training the retriever), and (iii) identifying the correct answer in the context (for training the reader). Previous pretraining approaches generally fall short of one or more of these requirements. In this work, we automatically construct a large-scale corpus that meets all three criteria by consulting millions of references cited within Wikipedia. The well-aligned pretraining signals benefit both the retriever and the reader significantly. Our pretrained retriever leads to 2%-10% absolute gains in top-20 accuracy. And with our pretrained reader, the entire system improves by up to 4% in exact match.", "doc_id": "3a38802ed5117d2032e8877bf078933f", "publication_year": 2022, "sentences": ["we consider the problem of pretraining a two - stage open - domain question answering ( qa ) system ( retriever + reader ) with strong transfer capabilities .", "the key challenge is how to construct a large amount of high - quality question - answer - context triplets without task - specific annotations .", "specifically , the triplets should align well with downstream tasks by : ( i ) covering a wide range of domains ( for open - domain applications ) , ( ii ) linking a question to its semantically relevant context with supporting evidence ( for training the retriever ) , and ( iii ) identifying the correct answer in the context ( for training the reader ) .", "previous pretraining approaches generally fall short of one or more of these requirements .", "in this work , we automatically construct a large - scale corpus that meets all three criteria by consulting millions of references cited within wikipedia .", "the well - aligned pretraining signals benefit both the retriever and the reader significantly .", "our pretrained retriever leads to 2 % - 10 % absolute gains in top - 20 accuracy .", "and with our pretrained reader , the entire system improves by up to 4 % in exact match ."], "events": [{"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [0]}, {"text": "problem of pretraining a two - stage open - domain question answering ( qa ) system", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["problem", "of", "pretraining", "a", "two", "-", "stage", "open", "-", "domain", "question", "answering", "system"], "offsets": [3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 18]}, {"text": "with strong transfer capabilities", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "strong", "transfer", "capabilities"], "offsets": [24, 25, 26, 27]}], "trigger": {"text": "consider", "tokens": ["consider"], "offsets": [1]}}, {"event_type": "WKS", "arguments": [{"text": "without task - specific annotations", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["without", "task", "-", "specific", "annotations"], "offsets": [49, 50, 51, 52, 53]}, {"text": "large amount of high - quality question - answer - context triplets", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["large", "amount", "of", "high", "-", "quality", "question", "-", "answer", "-", "context", "triplets"], "offsets": [37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48]}], "trigger": {"text": "construct", "tokens": ["construct"], "offsets": [35]}}, {"event_type": "MDS", "arguments": [{"text": "align well", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["align", "well"], "offsets": [60, 61]}, {"text": "wide range of domains", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["wide", "range", "of", "domains"], "offsets": [72, 73, 74, 75]}], "trigger": {"text": "covering", "tokens": ["covering"], "offsets": [70]}}, {"event_type": "MDS", "arguments": [{"text": "question", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["question"], "offsets": [89]}, {"text": "its semantically relevant context", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["question", "semantically", "relevant", "context"], "offsets": [89, 92, 93, 94]}, {"text": "with supporting evidence", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "supporting", "evidence"], "offsets": [95, 96, 97]}, {"text": "align well", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["align", "well"], "offsets": [60, 61]}], "trigger": {"text": "linking", "tokens": ["linking"], "offsets": [87]}}, {"event_type": "MDS", "arguments": [{"text": "correct answer", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["correct", "answer"], "offsets": [111, 112]}, {"text": "context", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["context"], "offsets": [115]}, {"text": "align well", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["align", "well"], "offsets": [60, 61]}], "trigger": {"text": "identifying", "tokens": ["identifying"], "offsets": [109]}}, {"event_type": "PUR", "arguments": [{"text": "with downstream tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "downstream", "tasks"], "offsets": [62, 63, 64]}], "trigger": {"text": "align well", "tokens": ["align", "well"], "offsets": [60, 61]}}, {"event_type": "RWF", "arguments": [{"text": "previous pretraining approaches", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["previous", "pretraining", "approaches"], "offsets": [123, 124, 125]}, {"text": "short of one or more of these requirements", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["short", "of", "one", "or", "more", "of", "covering", "a", "wide", "range", "of", "domains", ",", "linking", "a", "question", "to", "question", "semantically", "relevant", "context", "with", "supporting", "evidence", "and", "identifying", "the", "correct", "answer", "in", "the", "context"], "offsets": [128, 129, 130, 131, 132, 133, 70, 71, 72, 73, 74, 75, 83, 87, 88, 89, 90, 89, 92, 93, 94, 95, 96, 97, 105, 109, 110, 111, 112, 113, 114, 115]}], "trigger": {"text": "fall", "tokens": ["fall"], "offsets": [127]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [141]}, {"text": "large - scale corpus", "nugget_type": "DST", "argument_type": "Content", "tokens": ["large", "-", "scale", "corpus"], "offsets": [145, 146, 147, 148]}], "trigger": {"text": "construct", "tokens": ["construct"], "offsets": [143]}}, {"event_type": "WKS", "arguments": [{"text": "retriever", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["retriever"], "offsets": [172]}, {"text": "reader", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["reader"], "offsets": [175]}], "trigger": {"text": "benefit", "tokens": ["benefit"], "offsets": [169]}}, {"event_type": "FAC", "arguments": [{"text": "pretrained retriever", "nugget_type": "MOD", "argument_type": "Subject", "tokens": ["pretrained", "retriever"], "offsets": [179, 180]}, {"text": "2 % - 10 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["2", "%", "-", "10", "%"], "offsets": [183, 184, 185, 186, 187]}, {"text": "gains", "nugget_type": "STR", "argument_type": "Object", "tokens": ["gains"], "offsets": [189]}, {"text": "absolute", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["absolute"], "offsets": [188]}, {"text": "top - 20 accuracy", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["top", "-", "20", "accuracy"], "offsets": [191, 192, 193, 194]}], "trigger": {"text": "leads", "tokens": ["leads"], "offsets": [181]}}, {"event_type": "CMP", "arguments": [{"text": "with our pretrained reader", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "our", "pretrained", "reader"], "offsets": [197, 198, 199, 200]}, {"text": "entire system", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["entire", "two", "-", "stage", "open", "-", "domain", "question", "answering", "system"], "offsets": [203, 7, 8, 9, 10, 11, 12, 13, 14, 18]}, {"text": "improves", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improves"], "offsets": [205]}, {"text": "4 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["4", "%"], "offsets": [209, 210]}], "trigger": {"text": "improves", "tokens": ["improves"], "offsets": [205]}}], "document": ["we", "consider", "the", "problem", "of", "pretraining", "a", "two", "-", "stage", "open", "-", "domain", "question", "answering", "(", "qa", ")", "system", "(", "retriever", "+", "reader", ")", "with", "strong", "transfer", "capabilities", ".", "the", "key", "challenge", "is", "how", "to", "construct", "a", "large", "amount", "of", "high", "-", "quality", "question", "-", "answer", "-", "context", "triplets", "without", "task", "-", "specific", "annotations", ".", "specifically", ",", "the", "triplets", "should", "align", "well", "with", "downstream", "tasks", "by", ":", "(", "i", ")", "covering", "a", "wide", "range", "of", "domains", "(", "for", "open", "-", "domain", "applications", ")", ",", "(", "ii", ")", "linking", "a", "question", "to", "its", "semantically", "relevant", "context", "with", "supporting", "evidence", "(", "for", "training", "the", "retriever", ")", ",", "and", "(", "iii", ")", "identifying", "the", "correct", "answer", "in", "the", "context", "(", "for", "training", "the", "reader", ")", ".", "previous", "pretraining", "approaches", "generally", "fall", "short", "of", "one", "or", "more", "of", "these", "requirements", ".", "in", "this", "work", ",", "we", "automatically", "construct", "a", "large", "-", "scale", "corpus", "that", "meets", "all", "three", "criteria", "by", "consulting", "millions", "of", "references", "cited", "within", "wikipedia", ".", "the", "well", "-", "aligned", "pretraining", "signals", "benefit", "both", "the", "retriever", "and", "the", "reader", "significantly", ".", "our", "pretrained", "retriever", "leads", "to", "2", "%", "-", "10", "%", "absolute", "gains", "in", "top", "-", "20", "accuracy", ".", "and", "with", "our", "pretrained", "reader", ",", "the", "entire", "system", "improves", "by", "up", "to", "4", "%", "in", "exact", "match", "."]}, {"venue": "ACL", "title": "Unsupervised Parallel Sentence Extraction with Parallel Segment Detection Helps Machine Translation", "abstract": "Mining parallel sentences from comparable corpora is important. Most previous work relies on supervised systems, which are trained on parallel data, thus their applicability is problematic in low-resource scenarios. Recent developments in building unsupervised bilingual word embeddings made it possible to mine parallel sentences based on cosine similarities of source and target language words. We show that relying only on this information is not enough, since sentences often have similar words but different meanings. We detect continuous parallel segments in sentence pair candidates and rely on them when mining parallel sentences. We show better mining accuracy on three language pairs in a standard shared task on artificial data. We also provide the first experiments showing that parallel sentences mined from real life sources improve unsupervised MT. Our code is available, we hope it will be used to support low-resource MT research.", "doc_id": "75144dca96ec05b1fe5270f172f0f3a5", "publication_year": 2019, "sentences": ["mining parallel sentences from comparable corpora is important .", "most previous work relies on supervised systems , which are trained on parallel data , thus their applicability is problematic in low - resource scenarios .", "recent developments in building unsupervised bilingual word embeddings made it possible to mine parallel sentences based on cosine similarities of source and target language words .", "we show that relying only on this information is not enough , since sentences often have similar words but different meanings .", "we detect continuous parallel segments in sentence pair candidates and rely on them when mining parallel sentences .", "we show better mining accuracy on three language pairs in a standard shared task on artificial data .", "we also provide the first experiments showing that parallel sentences mined from real life sources improve unsupervised mt .", "our code is available , we hope it will be used to support low - resource mt research ."], "events": [{"event_type": "ITT", "arguments": [{"text": "mining parallel sentences", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["mining", "parallel", "sentences"], "offsets": [0, 1, 2]}, {"text": "comparable corpora", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["comparable", "corpora"], "offsets": [4, 5]}], "trigger": {"text": "important", "tokens": ["important"], "offsets": [7]}}, {"event_type": "RWF", "arguments": [{"text": "most previous work", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["most", "previous", "work"], "offsets": [9, 10, 11]}, {"text": "applicability", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["applicability"], "offsets": [26]}, {"text": "problematic", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["problematic"], "offsets": [28]}], "trigger": {"text": "problematic", "tokens": ["problematic"], "offsets": [28]}}, {"event_type": "RWS", "arguments": [{"text": "unsupervised bilingual word embeddings", "nugget_type": "MOD", "argument_type": "Subject", "tokens": ["unsupervised", "bilingual", "word", "embeddings"], "offsets": [39, 40, 41, 42]}, {"text": "parallel sentences", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["parallel", "sentences"], "offsets": [48, 49]}, {"text": "cosine similarities of source and target language words", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["cosine", "similarities", "of", "source", "and", "target", "language", "words"], "offsets": [52, 53, 54, 55, 56, 57, 58, 59]}], "trigger": {"text": "mine", "tokens": ["mine"], "offsets": [47]}}, {"event_type": "RWF", "arguments": [{"text": "not enough", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["not", "enough"], "offsets": [70, 71]}, {"text": "only on this information", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["only", "on", "parallel", "sentences"], "offsets": [65, 66, 48, 49]}], "trigger": {"text": "relying", "tokens": ["relying"], "offsets": [64]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [83]}, {"text": "continuous parallel segments", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["continuous", "parallel", "segments"], "offsets": [85, 86, 87]}, {"text": "in sentence pair candidates", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "sentence", "pair", "candidates"], "offsets": [88, 89, 90, 91]}], "trigger": {"text": "detect", "tokens": ["detect"], "offsets": [84]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [83]}, {"text": "continuous parallel segments", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["continuous", "parallel", "segments"], "offsets": [85, 86, 87]}, {"text": "mining", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["mining"], "offsets": [97]}], "trigger": {"text": "rely", "tokens": ["rely"], "offsets": [93]}}, {"event_type": "PUR", "arguments": [{"text": "parallel sentences", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["parallel", "sentences"], "offsets": [98, 99]}], "trigger": {"text": "mining", "tokens": ["mining"], "offsets": [97]}}, {"event_type": "CMP", "arguments": [{"text": "better", "nugget_type": "STR", "argument_type": "Result", "tokens": ["better"], "offsets": [103]}, {"text": "mining accuracy", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["mining", "accuracy"], "offsets": [104, 105]}, {"text": "on three language pairs in a standard shared task", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "three", "language", "pairs", "in", "a", "standard", "shared", "task"], "offsets": [106, 107, 108, 109, 110, 111, 112, 113, 114]}, {"text": "artificial data", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["artificial", "data"], "offsets": [116, 117]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [102]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [119]}, {"text": "first experiments", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["first", "experiments"], "offsets": [123, 124]}], "trigger": {"text": "provide", "tokens": ["provide"], "offsets": [121]}}, {"event_type": "FAC", "arguments": [{"text": "parallel sentences mined from real life sources", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["parallel", "sentences", "mined", "from", "real", "life", "sources"], "offsets": [127, 128, 129, 130, 131, 132, 133]}, {"text": "unsupervised mt", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["unsupervised", "mt"], "offsets": [135, 136]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [134]}}], "document": ["mining", "parallel", "sentences", "from", "comparable", "corpora", "is", "important", ".", "most", "previous", "work", "relies", "on", "supervised", "systems", ",", "which", "are", "trained", "on", "parallel", "data", ",", "thus", "their", "applicability", "is", "problematic", "in", "low", "-", "resource", "scenarios", ".", "recent", "developments", "in", "building", "unsupervised", "bilingual", "word", "embeddings", "made", "it", "possible", "to", "mine", "parallel", "sentences", "based", "on", "cosine", "similarities", "of", "source", "and", "target", "language", "words", ".", "we", "show", "that", "relying", "only", "on", "this", "information", "is", "not", "enough", ",", "since", "sentences", "often", "have", "similar", "words", "but", "different", "meanings", ".", "we", "detect", "continuous", "parallel", "segments", "in", "sentence", "pair", "candidates", "and", "rely", "on", "them", "when", "mining", "parallel", "sentences", ".", "we", "show", "better", "mining", "accuracy", "on", "three", "language", "pairs", "in", "a", "standard", "shared", "task", "on", "artificial", "data", ".", "we", "also", "provide", "the", "first", "experiments", "showing", "that", "parallel", "sentences", "mined", "from", "real", "life", "sources", "improve", "unsupervised", "mt", ".", "our", "code", "is", "available", ",", "we", "hope", "it", "will", "be", "used", "to", "support", "low", "-", "resource", "mt", "research", "."]}, {"venue": "ACL", "title": "Scaling up Open Tagging from Tens to Thousands: Comprehension Empowered Attribute Value Extraction from Product Title", "abstract": "Supplementing product information by extracting attribute values from title is a crucial task in e-Commerce domain. Previous studies treat each attribute only as an entity type and build one set of NER tags (e.g., BIO) for each of them, leading to a scalability issue which unfits to the large sized attribute system in real world e-Commerce. In this work, we propose a novel approach to support value extraction scaling up to thousands of attributes without losing performance: (1) We propose to regard attribute as a query and adopt only one global set of BIO tags for any attributes to reduce the burden of attribute tag or model explosion; (2) We explicitly model the semantic representations for attribute and title, and develop an attention mechanism to capture the interactive semantic relations in-between to enforce our framework to be attribute comprehensive. We conduct extensive experiments in real-life datasets. The results show that our model not only outperforms existing state-of-the-art NER tagging models, but also is robust and generates promising results for up to 8,906 attributes.", "doc_id": "976328f7bf9b0ddff36efe5e2c8f44d9", "publication_year": 2019, "sentences": ["supplementing product information by extracting attribute values from title is a crucial task in e - commerce domain .", "previous studies treat each attribute only as an entity type and build one set of ner tags ( e . g . , bio ) for each of them , leading to a scalability issue which unfits to the large sized attribute system in real world e - commerce .", "in this work , we propose a novel approach to support value extraction scaling up to thousands of attributes without losing performance : ( 1 ) we propose to regard attribute as a query and adopt only one global set of bio tags for any attributes to reduce the burden of attribute tag or model explosion ; ( 2 ) we explicitly model the semantic representations for attribute and title , and develop an attention mechanism to capture the interactive semantic relations in - between to enforce our framework to be attribute comprehensive .", "we conduct extensive experiments in real - life datasets .", "the results show that our model not only outperforms existing state - of - the - art ner tagging models , but also is robust and generates promising results for up to 8 , 906 attributes ."], "events": [{"event_type": "RWF", "arguments": [{"text": "previous studies", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["previous", "studies"], "offsets": [19, 20]}, {"text": "scalability issue", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["scalability", "issue"], "offsets": [52, 53]}], "trigger": {"text": "leading", "tokens": ["leading"], "offsets": [49]}}, {"event_type": "ITT", "arguments": [{"text": "e - commerce domain", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["e", "-", "commerce", "domain"], "offsets": [14, 15, 16, 17]}], "trigger": {"text": "crucial", "tokens": ["crucial"], "offsets": [11]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [73]}, {"text": "approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["approach"], "offsets": [77]}, {"text": "support", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["support"], "offsets": [79]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [74]}}, {"event_type": "PUR", "arguments": [{"text": "value extraction scaling up to thousands of attributes", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["value", "extraction", "scaling", "up", "to", "thousands", "of", "attributes"], "offsets": [80, 81, 82, 83, 84, 85, 86, 87]}, {"text": "without losing performance", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["without", "losing", "performance"], "offsets": [88, 89, 90]}], "trigger": {"text": "support", "tokens": ["support"], "offsets": [79]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [95]}, {"text": "attribute", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["attribute"], "offsets": [99]}, {"text": "as a query", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["as", "a", "query"], "offsets": [100, 101, 102]}], "trigger": {"text": "regard", "tokens": ["regard"], "offsets": [98]}}, {"event_type": "PUR", "arguments": [{"text": "burden of attribute tag", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["burden", "of", "attribute", "tag"], "offsets": [118, 119, 120, 121]}, {"text": "burden of model explosion", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["burden", "of", "model", "explosion"], "offsets": [118, 119, 123, 124]}], "trigger": {"text": "reduce", "tokens": ["reduce"], "offsets": [116]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [129]}, {"text": "attention mechanism", "nugget_type": "APP", "argument_type": "Content", "tokens": ["attention", "mechanism"], "offsets": [143, 144]}, {"text": "capture", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["capture"], "offsets": [146]}], "trigger": {"text": "develop", "tokens": ["develop"], "offsets": [141]}}, {"event_type": "PUR", "arguments": [{"text": "interactive semantic relations in - between", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["interactive", "semantic", "relations", "in", "-", "between"], "offsets": [148, 149, 150, 151, 152, 153]}], "trigger": {"text": "capture", "tokens": ["capture"], "offsets": [146]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [163]}, {"text": "extensive experiments", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["extensive", "experiments"], "offsets": [165, 166]}, {"text": "real - life datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["real", "-", "life", "datasets"], "offsets": [168, 169, 170, 171]}], "trigger": {"text": "conduct", "tokens": ["conduct"], "offsets": [164]}}, {"event_type": "FIN", "arguments": [{"text": "outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforms"], "offsets": [181]}, {"text": "robust", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["robust"], "offsets": [197]}, {"text": "generates", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["generates"], "offsets": [199]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [175]}}, {"event_type": "CMP", "arguments": [{"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [181]}, {"text": "existing state - of - the - art ner tagging models", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["existing", "state", "-", "of", "-", "the", "-", "art", "ner", "tagging", "models"], "offsets": [182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [181]}}, {"event_type": "FAC", "arguments": [{"text": "8 , 906 attributes", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["8", ",", "906", "attributes"], "offsets": [205, 206, 207, 208]}, {"text": "promising results", "nugget_type": "STR", "argument_type": "Object", "tokens": ["promising", "results"], "offsets": [200, 201]}, {"text": "model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["model"], "offsets": [178]}], "trigger": {"text": "generates", "tokens": ["generates"], "offsets": [199]}}, {"event_type": "RWS", "arguments": [{"text": "one set of ner tags", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["one", "set", "of", "ner", "tags"], "offsets": [31, 32, 33, 34, 35]}, {"text": "each of them", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["each", "of", "attribute"], "offsets": [45, 46, 23]}], "trigger": {"text": "build", "tokens": ["build"], "offsets": [30]}}, {"event_type": "RWF", "arguments": [{"text": "unfits", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["unfits"], "offsets": [55]}, {"text": "large sized attribute system", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["large", "sized", "attribute", "system"], "offsets": [58, 59, 60, 61]}, {"text": "in real world e - commerce", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "real", "world", "e", "-", "commerce"], "offsets": [62, 63, 64, 65, 66, 67]}], "trigger": {"text": "unfits", "tokens": ["unfits"], "offsets": [55]}}, {"event_type": "MDS", "arguments": [{"text": "one global set of bio tags", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["one", "global", "set", "of", "bio", "tags"], "offsets": [106, 107, 108, 109, 110, 111]}, {"text": "attributes", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["attributes"], "offsets": [114]}], "trigger": {"text": "adopt", "tokens": ["adopt"], "offsets": [104]}}, {"event_type": "MDS", "arguments": [{"text": "semantic representations", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["semantic", "representations"], "offsets": [133, 134]}, {"text": "attribute", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["attribute"], "offsets": [136]}, {"text": "title", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["title"], "offsets": [138]}, {"text": "enforce", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["enforce"], "offsets": [155]}], "trigger": {"text": "explicitly model", "tokens": ["explicitly", "model"], "offsets": [130, 131]}}, {"event_type": "PUR", "arguments": [{"text": "framework to be attribute comprehensive", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["framework", "to", "be", "attribute", "comprehensive"], "offsets": [157, 158, 159, 160, 161]}], "trigger": {"text": "enforce", "tokens": ["enforce"], "offsets": [155]}}, {"event_type": "FAC", "arguments": [{"text": "model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["model"], "offsets": [178]}], "trigger": {"text": "robust", "tokens": ["robust"], "offsets": [197]}}], "document": ["supplementing", "product", "information", "by", "extracting", "attribute", "values", "from", "title", "is", "a", "crucial", "task", "in", "e", "-", "commerce", "domain", ".", "previous", "studies", "treat", "each", "attribute", "only", "as", "an", "entity", "type", "and", "build", "one", "set", "of", "ner", "tags", "(", "e", ".", "g", ".", ",", "bio", ")", "for", "each", "of", "them", ",", "leading", "to", "a", "scalability", "issue", "which", "unfits", "to", "the", "large", "sized", "attribute", "system", "in", "real", "world", "e", "-", "commerce", ".", "in", "this", "work", ",", "we", "propose", "a", "novel", "approach", "to", "support", "value", "extraction", "scaling", "up", "to", "thousands", "of", "attributes", "without", "losing", "performance", ":", "(", "1", ")", "we", "propose", "to", "regard", "attribute", "as", "a", "query", "and", "adopt", "only", "one", "global", "set", "of", "bio", "tags", "for", "any", "attributes", "to", "reduce", "the", "burden", "of", "attribute", "tag", "or", "model", "explosion", ";", "(", "2", ")", "we", "explicitly", "model", "the", "semantic", "representations", "for", "attribute", "and", "title", ",", "and", "develop", "an", "attention", "mechanism", "to", "capture", "the", "interactive", "semantic", "relations", "in", "-", "between", "to", "enforce", "our", "framework", "to", "be", "attribute", "comprehensive", ".", "we", "conduct", "extensive", "experiments", "in", "real", "-", "life", "datasets", ".", "the", "results", "show", "that", "our", "model", "not", "only", "outperforms", "existing", "state", "-", "of", "-", "the", "-", "art", "ner", "tagging", "models", ",", "but", "also", "is", "robust", "and", "generates", "promising", "results", "for", "up", "to", "8", ",", "906", "attributes", "."]}, {"venue": "ACL", "title": "Space Efficient Context Encoding for Non-Task-Oriented Dialogue Generation with Graph Attention Transformer", "abstract": "To improve the coherence and knowledge retrieval capabilities of non-task-oriented dialogue systems, recent Transformer-based models aim to integrate fixed background context. This often comes in the form of knowledge graphs, and the integration is done by creating pseudo utterances through paraphrasing knowledge triples, added into the accumulated dialogue context. However, the context length is fixed in these architectures, which restricts how much background or dialogue context can be kept. In this work, we propose a more concise encoding for background context structured in the form of knowledge graphs, by expressing the graph connections through restrictions on the attention weights. The results of our human evaluation show that this encoding reduces space requirements without negative effects on the precision of reproduction of knowledge and perceived consistency. Further, models trained with our proposed context encoding generate dialogues that are judged to be more comprehensive and interesting.", "doc_id": "2812792e07e358a78e9998f0e944553a", "publication_year": 2021, "sentences": ["to improve the coherence and knowledge retrieval capabilities of non - task - oriented dialogue systems , recent transformer - based models aim to integrate fixed background context .", "this often comes in the form of knowledge graphs , and the integration is done by creating pseudo utterances through paraphrasing knowledge triples , added into the accumulated dialogue context .", "however , the context length is fixed in these architectures , which restricts how much background or dialogue context can be kept .", "in this work , we propose a more concise encoding for background context structured in the form of knowledge graphs , by expressing the graph connections through restrictions on the attention weights .", "the results of our human evaluation show that this encoding reduces space requirements without negative effects on the precision of reproduction of knowledge and perceived consistency .", "further , models trained with our proposed context encoding generate dialogues that are judged to be more comprehensive and interesting ."], "events": [{"event_type": "ITT", "arguments": [{"text": "transformer - based models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["transformer", "-", "based", "models"], "offsets": [18, 19, 20, 21]}], "trigger": {"text": "aim", "tokens": ["aim"], "offsets": [22]}}, {"event_type": "MDS", "arguments": [{"text": "pseudo utterances", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["pseudo", "utterances"], "offsets": [46, 47]}, {"text": "paraphrasing knowledge triples", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["paraphrasing", "knowledge", "triples"], "offsets": [49, 50, 51]}, {"text": "added", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["added"], "offsets": [53]}], "trigger": {"text": "creating", "tokens": ["creating"], "offsets": [45]}}, {"event_type": "PUR", "arguments": [{"text": "accumulated dialogue context", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["accumulated", "dialogue", "context"], "offsets": [56, 57, 58]}], "trigger": {"text": "added", "tokens": ["added"], "offsets": [53]}}, {"event_type": "RWF", "arguments": [{"text": "context length", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["context", "length"], "offsets": [63, 64]}, {"text": "fixed", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["fixed"], "offsets": [66]}], "trigger": {"text": "fixed", "tokens": ["fixed"], "offsets": [66]}}, {"event_type": "RWF", "arguments": [{"text": "restricts", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["restricts"], "offsets": [72]}, {"text": "context length", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["context", "length"], "offsets": [63, 64]}], "trigger": {"text": "restricts", "tokens": ["restricts"], "offsets": [72]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [87]}, {"text": "concise encoding", "nugget_type": "APP", "argument_type": "Content", "tokens": ["concise", "encoding"], "offsets": [91, 92]}, {"text": "background context structured in the form of knowledge graphs", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["background", "context", "structured", "in", "the", "form", "of", "knowledge", "graphs"], "offsets": [94, 95, 96, 97, 98, 99, 100, 101, 102]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [88]}}, {"event_type": "MDS", "arguments": [{"text": "attention weights", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["attention", "weights"], "offsets": [113, 114]}, {"text": "graph connections", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["graph", "connections"], "offsets": [107, 108]}], "trigger": {"text": "restrictions", "tokens": ["restrictions"], "offsets": [110]}}, {"event_type": "CMP", "arguments": [{"text": "without negative effects on the precision of reproduction of knowledge and perceived consistency", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["without", "negative", "effects", "on", "the", "precision", "of", "reproduction", "of", "knowledge", "and", "perceived", "consistency"], "offsets": [129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141]}, {"text": "more concise encoding for background context structured in the form of knowledge graphs", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["more", "concise", "encoding", "for", "background", "context", "structured", "in", "the", "form", "of", "knowledge", "graphs"], "offsets": [90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102]}, {"text": "reduces", "nugget_type": "STR", "argument_type": "Result", "tokens": ["reduces"], "offsets": [126]}], "trigger": {"text": "reduces", "tokens": ["reduces"], "offsets": [126]}}, {"event_type": "CMP", "arguments": [{"text": "models trained with our proposed context encoding generate dialogues", "nugget_type": "FEA", "argument_type": "Arg1", "tokens": ["models", "trained", "with", "more", "concise", "encoding", "for", "background", "context", "structured", "in", "the", "form", "of", "knowledge", "graphs", "generate", "dialogues"], "offsets": [145, 146, 147, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 152, 153]}], "trigger": {"text": "more comprehensive and interesting", "tokens": ["more", "comprehensive", "and", "interesting"], "offsets": [159, 160, 161, 162]}}], "document": ["to", "improve", "the", "coherence", "and", "knowledge", "retrieval", "capabilities", "of", "non", "-", "task", "-", "oriented", "dialogue", "systems", ",", "recent", "transformer", "-", "based", "models", "aim", "to", "integrate", "fixed", "background", "context", ".", "this", "often", "comes", "in", "the", "form", "of", "knowledge", "graphs", ",", "and", "the", "integration", "is", "done", "by", "creating", "pseudo", "utterances", "through", "paraphrasing", "knowledge", "triples", ",", "added", "into", "the", "accumulated", "dialogue", "context", ".", "however", ",", "the", "context", "length", "is", "fixed", "in", "these", "architectures", ",", "which", "restricts", "how", "much", "background", "or", "dialogue", "context", "can", "be", "kept", ".", "in", "this", "work", ",", "we", "propose", "a", "more", "concise", "encoding", "for", "background", "context", "structured", "in", "the", "form", "of", "knowledge", "graphs", ",", "by", "expressing", "the", "graph", "connections", "through", "restrictions", "on", "the", "attention", "weights", ".", "the", "results", "of", "our", "human", "evaluation", "show", "that", "this", "encoding", "reduces", "space", "requirements", "without", "negative", "effects", "on", "the", "precision", "of", "reproduction", "of", "knowledge", "and", "perceived", "consistency", ".", "further", ",", "models", "trained", "with", "our", "proposed", "context", "encoding", "generate", "dialogues", "that", "are", "judged", "to", "be", "more", "comprehensive", "and", "interesting", "."]}, {"venue": "ACL", "title": "Enhancing Topic-to-Essay Generation with External Commonsense Knowledge", "abstract": "Automatic topic-to-essay generation is a challenging task since it requires generating novel, diverse, and topic-consistent paragraph-level text with a set of topics as input. Previous work tends to perform essay generation based solely on the given topics while ignoring massive commonsense knowledge. However, this commonsense knowledge provides additional background information, which can help to generate essays that are more novel and diverse. Towards filling this gap, we propose to integrate commonsense from the external knowledge base into the generator through dynamic memory mechanism. Besides, the adversarial training based on a multi-label discriminator is employed to further improve topic-consistency. We also develop a series of automatic evaluation metrics to comprehensively assess the quality of the generated essay. Experiments show that with external commonsense knowledge and adversarial training, the generated essays are more novel, diverse, and topic-consistent than existing methods in terms of both automatic and human evaluation.", "doc_id": "ada7f8bb943147fcf10007cb392b1fc1", "publication_year": 2019, "sentences": ["automatic topic - to - essay generation is a challenging task since it requires generating novel , diverse , and topic - consistent paragraph - level text with a set of topics as input .", "previous work tends to perform essay generation based solely on the given topics while ignoring massive commonsense knowledge .", "however , this commonsense knowledge provides additional background information , which can help to generate essays that are more novel and diverse .", "towards filling this gap , we propose to integrate commonsense from the external knowledge base into the generator through dynamic memory mechanism .", "besides , the adversarial training based on a multi - label discriminator is employed to further improve topic - consistency .", "we also develop a series of automatic evaluation metrics to comprehensively assess the quality of the generated essay .", "experiments show that with external commonsense knowledge and adversarial training , the generated essays are more novel , diverse , and topic - consistent than existing methods in terms of both automatic and human evaluation ."], "events": [{"event_type": "ITT", "arguments": [{"text": "automatic topic - to - essay generation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["automatic", "topic", "-", "to", "-", "essay", "generation"], "offsets": [0, 1, 2, 3, 4, 5, 6]}], "trigger": {"text": "task", "tokens": ["task"], "offsets": [10]}}, {"event_type": "RWF", "arguments": [{"text": "previous work", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["previous", "work"], "offsets": [35, 36]}, {"text": "ignoring", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["ignoring"], "offsets": [49]}], "trigger": {"text": "ignoring", "tokens": ["ignoring"], "offsets": [49]}}, {"event_type": "MDS", "arguments": [{"text": "generator", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["generator"], "offsets": [94]}, {"text": "through dynamic memory mechanism", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["through", "dynamic", "memory", "mechanism"], "offsets": [95, 96, 97, 98]}, {"text": "filling", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["filling"], "offsets": [78]}, {"text": "commonsense from the external knowledge base", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["commonsense", "from", "the", "external", "knowledge", "base"], "offsets": [86, 87, 88, 89, 90, 91]}], "trigger": {"text": "integrate", "tokens": ["integrate"], "offsets": [85]}}, {"event_type": "PUR", "arguments": [{"text": "gap", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["gap"], "offsets": [80]}], "trigger": {"text": "filling", "tokens": ["filling"], "offsets": [78]}}, {"event_type": "PUR", "arguments": [{"text": "quality of the generated essay", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["quality", "of", "the", "generated", "essay"], "offsets": [134, 135, 136, 137, 138]}], "trigger": {"text": "comprehensively assess", "tokens": ["comprehensively", "assess"], "offsets": [131, 132]}}, {"event_type": "FIN", "arguments": [{"text": "more novel , diverse , and topic - consistent", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["more", "novel", ",", "diverse", ",", "and", "topic", "-", "consistent"], "offsets": [155, 156, 157, 158, 159, 160, 161, 162, 163]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [141]}}, {"event_type": "CMP", "arguments": [{"text": "with external commonsense knowledge and adversarial training", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "external", "commonsense", "knowledge", "and", "adversarial", "training"], "offsets": [143, 144, 145, 146, 147, 148, 149]}, {"text": "in terms of both automatic and human evaluation", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "terms", "of", "both", "automatic", "and", "human", "evaluation"], "offsets": [167, 168, 169, 170, 171, 172, 173, 174]}, {"text": "existing methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["existing", "methods"], "offsets": [165, 166]}, {"text": "generated essays", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["generated", "essays"], "offsets": [152, 153]}], "trigger": {"text": "more novel , diverse , and topic - consistent", "tokens": ["more", "novel", ",", "diverse", ",", "and", "topic", "-", "consistent"], "offsets": [155, 156, 157, 158, 159, 160, 161, 162, 163]}}, {"event_type": "WKS", "arguments": [{"text": "adversarial training based on a multi - label discriminator", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["adversarial", "training", "based", "on", "a", "multi", "-", "label", "discriminator"], "offsets": [103, 104, 105, 106, 107, 108, 109, 110, 111]}, {"text": "further improve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["further", "improve"], "offsets": [115, 116]}], "trigger": {"text": "employed", "tokens": ["employed"], "offsets": [113]}}, {"event_type": "PUR", "arguments": [{"text": "topic - consistency", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["topic", "-", "consistency"], "offsets": [117, 118, 119]}], "trigger": {"text": "further improve", "tokens": ["further", "improve"], "offsets": [115, 116]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [121]}, {"text": "automatic evaluation metrics", "nugget_type": "APP", "argument_type": "Content", "tokens": ["automatic", "evaluation", "metrics"], "offsets": [127, 128, 129]}, {"text": "comprehensively assess", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["comprehensively", "assess"], "offsets": [131, 132]}], "trigger": {"text": "develop", "tokens": ["develop"], "offsets": [123]}}], "document": ["automatic", "topic", "-", "to", "-", "essay", "generation", "is", "a", "challenging", "task", "since", "it", "requires", "generating", "novel", ",", "diverse", ",", "and", "topic", "-", "consistent", "paragraph", "-", "level", "text", "with", "a", "set", "of", "topics", "as", "input", ".", "previous", "work", "tends", "to", "perform", "essay", "generation", "based", "solely", "on", "the", "given", "topics", "while", "ignoring", "massive", "commonsense", "knowledge", ".", "however", ",", "this", "commonsense", "knowledge", "provides", "additional", "background", "information", ",", "which", "can", "help", "to", "generate", "essays", "that", "are", "more", "novel", "and", "diverse", ".", "towards", "filling", "this", "gap", ",", "we", "propose", "to", "integrate", "commonsense", "from", "the", "external", "knowledge", "base", "into", "the", "generator", "through", "dynamic", "memory", "mechanism", ".", "besides", ",", "the", "adversarial", "training", "based", "on", "a", "multi", "-", "label", "discriminator", "is", "employed", "to", "further", "improve", "topic", "-", "consistency", ".", "we", "also", "develop", "a", "series", "of", "automatic", "evaluation", "metrics", "to", "comprehensively", "assess", "the", "quality", "of", "the", "generated", "essay", ".", "experiments", "show", "that", "with", "external", "commonsense", "knowledge", "and", "adversarial", "training", ",", "the", "generated", "essays", "are", "more", "novel", ",", "diverse", ",", "and", "topic", "-", "consistent", "than", "existing", "methods", "in", "terms", "of", "both", "automatic", "and", "human", "evaluation", "."]}, {"venue": "ACL", "title": "A Unified MRC Framework for Named Entity Recognition", "abstract": "The task of named entity recognition (NER) is normally divided into nested NER and flat NER depending on whether named entities are nested or not.Models are usually separately developed for the two tasks, since sequence labeling models, the most widely used backbone for flat NER, are only able to assign a single label to a particular token, which is unsuitable for nested NER where a token may be assigned several labels. In this paper, we propose a unified framework that is capable of handling both flat and nested NER tasks. Instead of treating the task of NER as a sequence labeling problem, we propose to formulate it as a machine reading comprehension (MRC) task. For example, extracting entities with the per label is formalized as extracting answer spans to the question \u201cwhich person is mentioned in the text\".This formulation naturally tackles the entity overlapping issue in nested NER: the extraction of two overlapping entities with different categories requires answering two independent questions. Additionally, since the query encodes informative prior knowledge, this strategy facilitates the process of entity extraction, leading to better performances for not only nested NER, but flat NER. We conduct experiments on both nested and flat NER datasets.Experiment results demonstrate the effectiveness of the proposed formulation. We are able to achieve a vast amount of performance boost over current SOTA models on nested NER datasets, i.e., +1.28, +2.55, +5.44, +6.37,respectively on ACE04, ACE05, GENIA and KBP17, along with SOTA results on flat NER datasets, i.e., +0.24, +1.95, +0.21, +1.49 respectively on English CoNLL 2003, English OntoNotes 5.0, Chinese MSRA and Chinese OntoNotes 4.0.", "doc_id": "60fc259ec5fb992a8fac46ee65b161b7", "publication_year": 2020, "sentences": ["the task of named entity recognition ( ner ) is normally divided into nested ner and flat ner depending on whether named entities are nested or not .", "models are usually separately developed for the two tasks , since sequence labeling models , the most widely used backbone for flat ner , are only able to assign a single label to a particular token , which is unsuitable for nested ner where a token may be assigned several labels .", "in this paper , we propose a unified framework that is capable of handling both flat and nested ner tasks .", "instead of treating the task of ner as a sequence labeling problem , we propose to formulate it as a machine reading comprehension ( mrc ) task .", "for example , extracting entities with the per label is formalized as extracting answer spans to the question \u201c which person is mentioned in the text \" . this formulation naturally tackles the entity overlapping issue in nested ner : the extraction of two overlapping entities with different categories requires answering two independent questions .", "additionally , since the query encodes informative prior knowledge , this strategy facilitates the process of entity extraction , leading to better performances for not only nested ner , but flat ner .", "we conduct experiments on both nested and flat ner datasets .", "experiment results demonstrate the effectiveness of the proposed formulation .", "we are able to achieve a vast amount of performance boost over current sota models on nested ner datasets , i . e . , + 1 . 28 , + 2 . 55 , + 5 . 44 , + 6 . 37 , respectively on ace04 , ace05 , genia and kbp17 , along with sota results on flat ner datasets , i . e . , + 0 . 24 , + 1 . 95 , + 0 . 21 , + 1 . 49 respectively on english conll 2003 , english ontonotes 5 . 0 , chinese msra and chinese ontonotes 4 . 0 ."], "events": [{"event_type": "ITT", "arguments": [{"text": "the task of named entity recognition", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["the", "task", "of", "named", "entity", "recognition"], "offsets": [0, 1, 2, 3, 4, 5]}], "trigger": {"text": "is normally divided", "tokens": ["is", "normally", "divided"], "offsets": [9, 10, 11]}}, {"event_type": "RWF", "arguments": [{"text": "unsuitable", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["unsuitable"], "offsets": [67]}, {"text": "nested ner", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["nested", "ner"], "offsets": [69, 70]}], "trigger": {"text": "unsuitable", "tokens": ["unsuitable"], "offsets": [67]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [84]}, {"text": "unified framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["unified", "framework"], "offsets": [87, 88]}, {"text": "handling", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["handling"], "offsets": [93]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [85]}}, {"event_type": "PUR", "arguments": [{"text": "flat ner tasks", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["flat", "ner", "tasks"], "offsets": [95, 98, 99]}, {"text": "nested ner tasks", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["nested", "ner", "tasks"], "offsets": [97, 98, 99]}], "trigger": {"text": "handling", "tokens": ["handling"], "offsets": [93]}}, {"event_type": "WKS", "arguments": [{"text": "machine reading comprehension", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["machine", "reading", "comprehension"], "offsets": [121, 122, 123]}], "trigger": {"text": "formulate", "tokens": ["formulate"], "offsets": [117]}}, {"event_type": "FAC", "arguments": [{"text": "extraction of two overlapping entities with different categories", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["extraction", "of", "two", "overlapping", "entities", "with", "different", "categories"], "offsets": [170, 171, 172, 173, 174, 175, 176, 177]}, {"text": "the process of entity extraction", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["the", "process", "of", "entity", "extraction"], "offsets": [197, 198, 199, 200, 201]}, {"text": "performances", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["performances"], "offsets": [206]}, {"text": "for not only nested ner , but flat ner", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["for", "not", "only", "nested", "ner", ",", "but", "flat", "ner"], "offsets": [207, 208, 209, 210, 211, 212, 213, 214, 215]}], "trigger": {"text": "facilitates", "tokens": ["facilitates"], "offsets": [196]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [217]}, {"text": "experiments", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["experiments"], "offsets": [219]}, {"text": "nested ner datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["nested", "ner", "datasets"], "offsets": [222, 225, 226]}, {"text": "flat ner datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["flat", "ner", "datasets"], "offsets": [224, 225, 226]}], "trigger": {"text": "conduct", "tokens": ["conduct"], "offsets": [218]}}, {"event_type": "FAC", "arguments": [{"text": "experiment results", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["experiment", "results"], "offsets": [228, 229]}, {"text": "the effectiveness of the proposed formulation", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["the", "effectiveness", "of", "the", "proposed", "formulation"], "offsets": [231, 232, 233, 234, 235, 236]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [230]}}, {"event_type": "CMP", "arguments": [{"text": "boost", "nugget_type": "STR", "argument_type": "Result", "tokens": ["boost"], "offsets": [248]}, {"text": "performance", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["performance"], "offsets": [247]}, {"text": "current sota models", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["current", "sota", "models"], "offsets": [250, 251, 252]}, {"text": "on nested ner datasets", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "nested", "ner", "datasets"], "offsets": [253, 254, 255, 256]}, {"text": "1 . 28", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["1", ".", "28"], "offsets": [264, 265, 266]}, {"text": "ace04", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["ace04"], "offsets": [285]}], "trigger": {"text": "boost", "tokens": ["boost"], "offsets": [248]}}, {"event_type": "CMP", "arguments": [{"text": "unified framework", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["unified", "framework"], "offsets": [87, 88]}, {"text": "performance", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["performance"], "offsets": [247]}, {"text": "boost", "nugget_type": "STR", "argument_type": "Result", "tokens": ["boost"], "offsets": [248]}, {"text": "current sota models", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["current", "sota", "models"], "offsets": [250, 251, 252]}, {"text": "on nested ner datasets", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "nested", "ner", "datasets"], "offsets": [253, 254, 255, 256]}, {"text": "2 . 55", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["2", ".", "55"], "offsets": [269, 270, 271]}], "trigger": {"text": "boost", "tokens": ["boost"], "offsets": [248]}}, {"event_type": "CMP", "arguments": [{"text": "boost", "nugget_type": "STR", "argument_type": "Result", "tokens": ["boost"], "offsets": [248]}, {"text": "performance", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["performance"], "offsets": [247]}, {"text": "current sota models", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["current", "sota", "models"], "offsets": [250, 251, 252]}, {"text": "unified framework", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["unified", "framework"], "offsets": [87, 88]}, {"text": "on nested ner datasets", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "nested", "ner", "datasets"], "offsets": [253, 254, 255, 256]}, {"text": "genia", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["genia"], "offsets": [289]}], "trigger": {"text": "boost", "tokens": ["boost"], "offsets": [248]}}, {"event_type": "CMP", "arguments": [{"text": "boost", "nugget_type": "STR", "argument_type": "Result", "tokens": ["boost"], "offsets": [248]}, {"text": "performance", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["performance"], "offsets": [247]}, {"text": "current sota models", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["current", "sota", "models"], "offsets": [250, 251, 252]}, {"text": "unified framework", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["unified", "framework"], "offsets": [87, 88]}, {"text": "on nested ner datasets", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "nested", "ner", "datasets"], "offsets": [253, 254, 255, 256]}, {"text": "6 . 37", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["6", ".", "37"], "offsets": [279, 280, 281]}], "trigger": {"text": "boost", "tokens": ["boost"], "offsets": [248]}}, {"event_type": "CMP", "arguments": [{"text": "unified framework", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["unified", "framework"], "offsets": [87, 88]}, {"text": "boost", "nugget_type": "STR", "argument_type": "Result", "tokens": ["boost"], "offsets": [248]}, {"text": "current sota models", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["current", "sota", "models"], "offsets": [250, 251, 252]}, {"text": "sota results", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["sota", "results"], "offsets": [295, 296]}, {"text": "on flat ner datasets", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "flat", "ner", "datasets"], "offsets": [297, 298, 299, 300]}, {"text": "0 . 24", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["0", ".", "24"], "offsets": [308, 309, 310]}], "trigger": {"text": "boost", "tokens": ["boost"], "offsets": [248]}}, {"event_type": "CMP", "arguments": [{"text": "unified framework", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["unified", "framework"], "offsets": [87, 88]}, {"text": "boost", "nugget_type": "STR", "argument_type": "Result", "tokens": ["boost"], "offsets": [248]}, {"text": "current sota models", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["current", "sota", "models"], "offsets": [250, 251, 252]}, {"text": "sota results", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["sota", "results"], "offsets": [295, 296]}, {"text": "on flat ner datasets", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "flat", "ner", "datasets"], "offsets": [297, 298, 299, 300]}, {"text": "1 . 95", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["1", ".", "95"], "offsets": [313, 314, 315]}], "trigger": {"text": "boost", "tokens": ["boost"], "offsets": [248]}}, {"event_type": "CMP", "arguments": [{"text": "unified framework", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["unified", "framework"], "offsets": [87, 88]}, {"text": "boost", "nugget_type": "STR", "argument_type": "Result", "tokens": ["boost"], "offsets": [248]}, {"text": "current sota models", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["current", "sota", "models"], "offsets": [250, 251, 252]}, {"text": "sota results", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["sota", "results"], "offsets": [295, 296]}, {"text": "on flat ner datasets", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "flat", "ner", "datasets"], "offsets": [297, 298, 299, 300]}, {"text": "0 . 21", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["0", ".", "21"], "offsets": [318, 319, 320]}], "trigger": {"text": "boost", "tokens": ["boost"], "offsets": [248]}}, {"event_type": "CMP", "arguments": [{"text": "unified framework", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["unified", "framework"], "offsets": [87, 88]}, {"text": "boost", "nugget_type": "STR", "argument_type": "Result", "tokens": ["boost"], "offsets": [248]}, {"text": "current sota models", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["current", "sota", "models"], "offsets": [250, 251, 252]}, {"text": "sota results", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["sota", "results"], "offsets": [295, 296]}, {"text": "on flat ner datasets", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "flat", "ner", "datasets"], "offsets": [297, 298, 299, 300]}, {"text": "1 . 49", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["1", ".", "49"], "offsets": [323, 324, 325]}], "trigger": {"text": "boost", "tokens": ["boost"], "offsets": [248]}}], "document": ["the", "task", "of", "named", "entity", "recognition", "(", "ner", ")", "is", "normally", "divided", "into", "nested", "ner", "and", "flat", "ner", "depending", "on", "whether", "named", "entities", "are", "nested", "or", "not", ".", "models", "are", "usually", "separately", "developed", "for", "the", "two", "tasks", ",", "since", "sequence", "labeling", "models", ",", "the", "most", "widely", "used", "backbone", "for", "flat", "ner", ",", "are", "only", "able", "to", "assign", "a", "single", "label", "to", "a", "particular", "token", ",", "which", "is", "unsuitable", "for", "nested", "ner", "where", "a", "token", "may", "be", "assigned", "several", "labels", ".", "in", "this", "paper", ",", "we", "propose", "a", "unified", "framework", "that", "is", "capable", "of", "handling", "both", "flat", "and", "nested", "ner", "tasks", ".", "instead", "of", "treating", "the", "task", "of", "ner", "as", "a", "sequence", "labeling", "problem", ",", "we", "propose", "to", "formulate", "it", "as", "a", "machine", "reading", "comprehension", "(", "mrc", ")", "task", ".", "for", "example", ",", "extracting", "entities", "with", "the", "per", "label", "is", "formalized", "as", "extracting", "answer", "spans", "to", "the", "question", "\u201c", "which", "person", "is", "mentioned", "in", "the", "text", "\"", ".", "this", "formulation", "naturally", "tackles", "the", "entity", "overlapping", "issue", "in", "nested", "ner", ":", "the", "extraction", "of", "two", "overlapping", "entities", "with", "different", "categories", "requires", "answering", "two", "independent", "questions", ".", "additionally", ",", "since", "the", "query", "encodes", "informative", "prior", "knowledge", ",", "this", "strategy", "facilitates", "the", "process", "of", "entity", "extraction", ",", "leading", "to", "better", "performances", "for", "not", "only", "nested", "ner", ",", "but", "flat", "ner", ".", "we", "conduct", "experiments", "on", "both", "nested", "and", "flat", "ner", "datasets", ".", "experiment", "results", "demonstrate", "the", "effectiveness", "of", "the", "proposed", "formulation", ".", "we", "are", "able", "to", "achieve", "a", "vast", "amount", "of", "performance", "boost", "over", "current", "sota", "models", "on", "nested", "ner", "datasets", ",", "i", ".", "e", ".", ",", "+", "1", ".", "28", ",", "+", "2", ".", "55", ",", "+", "5", ".", "44", ",", "+", "6", ".", "37", ",", "respectively", "on", "ace04", ",", "ace05", ",", "genia", "and", "kbp17", ",", "along", "with", "sota", "results", "on", "flat", "ner", "datasets", ",", "i", ".", "e", ".", ",", "+", "0", ".", "24", ",", "+", "1", ".", "95", ",", "+", "0", ".", "21", ",", "+", "1", ".", "49", "respectively", "on", "english", "conll", "2003", ",", "english", "ontonotes", "5", ".", "0", ",", "chinese", "msra", "and", "chinese", "ontonotes", "4", ".", "0", "."]}, {"venue": "ACL", "title": "Embedding Time Differences in Context-sensitive Neural Networks for Learning Time to Event", "abstract": "We propose an effective context-sensitive neural model for time to event (TTE) prediction task, which aims to predict the amount of time to/from the occurrence of given events in streaming content. We investigate this problem in the context of a multi-task learning framework, which we enrich with time difference embeddings. In addition, we develop a multi-genre dataset of English events about soccer competitions and academy awards ceremonies, and their relevant tweets obtained from Twitter. Our model is 1.4 and 3.3 hours more accurate than the current state-of-the-art model in estimating TTE on English and Dutch tweets respectively. We examine different aspects of our model to illustrate its source of improvement.", "doc_id": "c2dfde7bcf5c49b5ab6e3c3aca76c2e6", "publication_year": 2021, "sentences": ["we propose an effective context - sensitive neural model for time to event ( tte ) prediction task , which aims to predict the amount of time to / from the occurrence of given events in streaming content .", "we investigate this problem in the context of a multi - task learning framework , which we enrich with time difference embeddings .", "in addition , we develop a multi - genre dataset of english events about soccer competitions and academy awards ceremonies , and their relevant tweets obtained from twitter .", "our model is 1 . 4 and 3 . 3 hours more accurate than the current state - of - the - art model in estimating tte on english and dutch tweets respectively .", "we examine different aspects of our model to illustrate its source of improvement ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [0]}, {"text": "effective context - sensitive neural model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["effective", "context", "-", "sensitive", "neural", "model"], "offsets": [3, 4, 5, 6, 7, 8]}, {"text": "time to event ( tte ) prediction task", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["time", "to", "event", "prediction", "task"], "offsets": [10, 11, 12, 16, 17]}, {"text": "predict", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["predict"], "offsets": [22]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [1]}}, {"event_type": "PUR", "arguments": [{"text": "amount of time", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["amount", "of", "time"], "offsets": [24, 25, 26]}, {"text": "to the occurrence of given events in streaming content", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["to", "the", "occurrence", "of", "given", "events", "in", "streaming", "content"], "offsets": [27, 30, 31, 32, 33, 34, 35, 36, 37]}, {"text": "from the occurrence of given events in streaming content", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "the", "occurrence", "of", "given", "events", "in", "streaming", "content"], "offsets": [29, 30, 31, 32, 33, 34, 35, 36, 37]}], "trigger": {"text": "predict", "tokens": ["predict"], "offsets": [22]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [39]}, {"text": "this problem", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["this", "problem"], "offsets": [41, 42]}, {"text": "in the context of a multi - task learning framework", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "context", "of", "a", "multi", "-", "task", "learning", "framework"], "offsets": [43, 44, 45, 46, 47, 48, 49, 50, 51, 52]}], "trigger": {"text": "investigate", "tokens": ["investigate"], "offsets": [40]}}, {"event_type": "MDS", "arguments": [{"text": "multi - task learning framework", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["multi", "-", "task", "learning", "framework"], "offsets": [48, 49, 50, 51, 52]}, {"text": "time difference embeddings", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["time", "difference", "embeddings"], "offsets": [58, 59, 60]}], "trigger": {"text": "enrich", "tokens": ["enrich"], "offsets": [56]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [65]}, {"text": "multi - genre dataset of english events", "nugget_type": "DST", "argument_type": "Content", "tokens": ["multi", "-", "genre", "dataset", "of", "english", "events"], "offsets": [68, 69, 70, 71, 72, 73, 74]}], "trigger": {"text": "develop", "tokens": ["develop"], "offsets": [66]}}, {"event_type": "CMP", "arguments": [{"text": "current state - of - the - art model", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["current", "state", "-", "of", "-", "the", "-", "art", "model"], "offsets": [106, 107, 108, 109, 110, 111, 112, 113, 114]}, {"text": "estimating tte", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["estimating", "tte"], "offsets": [116, 117]}, {"text": "effective context - sensitive neural model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["effective", "context", "-", "sensitive", "neural", "model"], "offsets": [3, 4, 5, 6, 7, 8]}, {"text": "1 . 4", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["1", ".", "4"], "offsets": [94, 95, 96]}, {"text": "hours", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["hours"], "offsets": [101]}, {"text": "more", "nugget_type": "STR", "argument_type": "Result", "tokens": ["more"], "offsets": [102]}, {"text": "3 . 3", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["3", ".", "3"], "offsets": [98, 99, 100]}, {"text": "on english", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "english"], "offsets": [118, 119]}, {"text": "on dutch tweets", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "dutch", "tweets"], "offsets": [118, 121, 122]}], "trigger": {"text": "accurate", "tokens": ["accurate"], "offsets": [103]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [125]}, {"text": "illustrate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["illustrate"], "offsets": [133]}, {"text": "different aspects of our model", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["different", "aspects", "of", "our", "model"], "offsets": [127, 128, 129, 130, 131]}], "trigger": {"text": "examine", "tokens": ["examine"], "offsets": [126]}}, {"event_type": "PUR", "arguments": [{"text": "source of improvement", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["source", "of", "improvement"], "offsets": [135, 136, 137]}], "trigger": {"text": "illustrate", "tokens": ["illustrate"], "offsets": [133]}}], "document": ["we", "propose", "an", "effective", "context", "-", "sensitive", "neural", "model", "for", "time", "to", "event", "(", "tte", ")", "prediction", "task", ",", "which", "aims", "to", "predict", "the", "amount", "of", "time", "to", "/", "from", "the", "occurrence", "of", "given", "events", "in", "streaming", "content", ".", "we", "investigate", "this", "problem", "in", "the", "context", "of", "a", "multi", "-", "task", "learning", "framework", ",", "which", "we", "enrich", "with", "time", "difference", "embeddings", ".", "in", "addition", ",", "we", "develop", "a", "multi", "-", "genre", "dataset", "of", "english", "events", "about", "soccer", "competitions", "and", "academy", "awards", "ceremonies", ",", "and", "their", "relevant", "tweets", "obtained", "from", "twitter", ".", "our", "model", "is", "1", ".", "4", "and", "3", ".", "3", "hours", "more", "accurate", "than", "the", "current", "state", "-", "of", "-", "the", "-", "art", "model", "in", "estimating", "tte", "on", "english", "and", "dutch", "tweets", "respectively", ".", "we", "examine", "different", "aspects", "of", "our", "model", "to", "illustrate", "its", "source", "of", "improvement", "."]}, {"venue": "ACL", "title": "Neural Graph Matching Networks for Chinese Short Text Matching", "abstract": "Chinese short text matching usually employs word sequences rather than character sequences to get better performance. However, Chinese word segmentation can be erroneous, ambiguous or inconsistent, which consequently hurts the final matching performance. To address this problem, we propose neural graph matching networks, a novel sentence matching framework capable of dealing with multi-granular input information. Instead of a character sequence or a single word sequence, paired word lattices formed from multiple word segmentation hypotheses are used as input and the model learns a graph representation according to an attentive graph matching mechanism. Experiments on two Chinese datasets show that our models outperform the state-of-the-art short text matching models.", "doc_id": "425f8205df4322fce1c0cb78db5386fc", "publication_year": 2020, "sentences": ["chinese short text matching usually employs word sequences rather than character sequences to get better performance .", "however , chinese word segmentation can be erroneous , ambiguous or inconsistent , which consequently hurts the final matching performance .", "to address this problem , we propose neural graph matching networks , a novel sentence matching framework capable of dealing with multi - granular input information .", "instead of a character sequence or a single word sequence , paired word lattices formed from multiple word segmentation hypotheses are used as input and the model learns a graph representation according to an attentive graph matching mechanism .", "experiments on two chinese datasets show that our models outperform the state - of - the - art short text matching models ."], "events": [{"event_type": "ITT", "arguments": [{"text": "chinese short text matching", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["chinese", "short", "text", "matching"], "offsets": [0, 1, 2, 3]}], "trigger": {"text": "employs", "tokens": ["employs"], "offsets": [5]}}, {"event_type": "RWF", "arguments": [{"text": "erroneous , ambiguous or inconsistent", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["erroneous", ",", "ambiguous", "or", "inconsistent"], "offsets": [24, 25, 26, 27, 28]}, {"text": "final matching performance", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["final", "matching", "performance"], "offsets": [34, 35, 36]}], "trigger": {"text": "consequently hurts", "tokens": ["consequently", "hurts"], "offsets": [31, 32]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [43]}, {"text": "neural graph matching networks", "nugget_type": "APP", "argument_type": "Content", "tokens": ["neural", "graph", "matching", "networks"], "offsets": [45, 46, 47, 48]}, {"text": "dealing", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["dealing"], "offsets": [57]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [44]}}, {"event_type": "PUR", "arguments": [{"text": "multi - granular input information", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["multi", "-", "granular", "input", "information"], "offsets": [59, 60, 61, 62, 63]}], "trigger": {"text": "dealing", "tokens": ["dealing"], "offsets": [57]}}, {"event_type": "MDS", "arguments": [{"text": "paired word lattices", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["paired", "word", "lattices"], "offsets": [76, 77, 78]}, {"text": "input", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["input"], "offsets": [88]}, {"text": "formed from multiple word segmentation hypotheses", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["formed", "from", "multiple", "word", "segmentation", "hypotheses"], "offsets": [79, 80, 81, 82, 83, 84]}], "trigger": {"text": "used", "tokens": ["used"], "offsets": [86]}}, {"event_type": "MDS", "arguments": [{"text": "graph representation", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["graph", "representation"], "offsets": [94, 95]}, {"text": "attentive graph matching mechanism", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["attentive", "graph", "matching", "mechanism"], "offsets": [99, 100, 101, 102]}], "trigger": {"text": "learns", "tokens": ["learns"], "offsets": [92]}}, {"event_type": "CMP", "arguments": [{"text": "two chinese datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["two", "chinese", "datasets"], "offsets": [106, 107, 108]}, {"text": "state - of - the - art short text matching models", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art", "short", "text", "matching", "models"], "offsets": [115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125]}, {"text": "neural graph matching networks", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["neural", "graph", "matching", "networks"], "offsets": [45, 46, 47, 48]}], "trigger": {"text": "outperform", "tokens": ["outperform"], "offsets": [113]}}], "document": ["chinese", "short", "text", "matching", "usually", "employs", "word", "sequences", "rather", "than", "character", "sequences", "to", "get", "better", "performance", ".", "however", ",", "chinese", "word", "segmentation", "can", "be", "erroneous", ",", "ambiguous", "or", "inconsistent", ",", "which", "consequently", "hurts", "the", "final", "matching", "performance", ".", "to", "address", "this", "problem", ",", "we", "propose", "neural", "graph", "matching", "networks", ",", "a", "novel", "sentence", "matching", "framework", "capable", "of", "dealing", "with", "multi", "-", "granular", "input", "information", ".", "instead", "of", "a", "character", "sequence", "or", "a", "single", "word", "sequence", ",", "paired", "word", "lattices", "formed", "from", "multiple", "word", "segmentation", "hypotheses", "are", "used", "as", "input", "and", "the", "model", "learns", "a", "graph", "representation", "according", "to", "an", "attentive", "graph", "matching", "mechanism", ".", "experiments", "on", "two", "chinese", "datasets", "show", "that", "our", "models", "outperform", "the", "state", "-", "of", "-", "the", "-", "art", "short", "text", "matching", "models", "."]}, {"venue": "ACL", "title": "Neural Mixed Counting Models for Dispersed Topic Discovery", "abstract": "Mixed counting models that use the negative binomial distribution as the prior can well model over-dispersed and hierarchically dependent random variables; thus they have attracted much attention in mining dispersed document topics. However, the existing parameter inference method like Monte Carlo sampling is quite time-consuming. In this paper, we propose two efficient neural mixed counting models, i.e., the Negative Binomial-Neural Topic Model (NB-NTM) and the Gamma Negative Binomial-Neural Topic Model (GNB-NTM) for dispersed topic discovery. Neural variational inference algorithms are developed to infer model parameters by using the reparameterization of Gamma distribution and the Gaussian approximation of Poisson distribution. Experiments on real-world datasets indicate that our models outperform state-of-the-art baseline models in terms of perplexity and topic coherence. The results also validate that both NB-NTM and GNB-NTM can produce explainable intermediate variables by generating dispersed proportions of document topics.", "doc_id": "49a310cac98701c0cc0997b0d7c37eba", "publication_year": 2020, "sentences": ["mixed counting models that use the negative binomial distribution as the prior can well model over - dispersed and hierarchically dependent random variables ; thus they have attracted much attention in mining dispersed document topics .", "however , the existing parameter inference method like monte carlo sampling is quite time - consuming .", "in this paper , we propose two efficient neural mixed counting models , i . e . , the negative binomial - neural topic model ( nb - ntm ) and the gamma negative binomial - neural topic model ( gnb - ntm ) for dispersed topic discovery .", "neural variational inference algorithms are developed to infer model parameters by using the reparameterization of gamma distribution and the gaussian approximation of poisson distribution .", "experiments on real - world datasets indicate that our models outperform state - of - the - art baseline models in terms of perplexity and topic coherence .", "the results also validate that both nb - ntm and gnb - ntm can produce explainable intermediate variables by generating dispersed proportions of document topics ."], "events": [{"event_type": "RWF", "arguments": [{"text": "time - consuming", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["time", "-", "consuming"], "offsets": [49, 50, 51]}], "trigger": {"text": "time - consuming", "tokens": ["time", "-", "consuming"], "offsets": [49, 50, 51]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [57]}, {"text": "two efficient neural mixed counting models", "nugget_type": "APP", "argument_type": "Content", "tokens": ["two", "efficient", "neural", "mixed", "counting", "models"], "offsets": [59, 60, 61, 62, 63, 64]}, {"text": "dispersed topic discovery", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["dispersed", "topic", "discovery"], "offsets": [98, 99, 100]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [58]}}, {"event_type": "WKS", "arguments": [{"text": "neural variational inference algorithms", "nugget_type": "APP", "argument_type": "Content", "tokens": ["neural", "variational", "inference", "algorithms"], "offsets": [102, 103, 104, 105]}, {"text": "infer", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["infer"], "offsets": [109]}, {"text": "using the reparameterization of gamma distribution and the gaussian approximation of poisson distribution", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["using", "the", "reparameterization", "of", "gamma", "distribution", "and", "the", "gaussian", "approximation", "of", "poisson", "distribution"], "offsets": [113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125]}], "trigger": {"text": "developed", "tokens": ["developed"], "offsets": [107]}}, {"event_type": "PUR", "arguments": [{"text": "model parameters", "nugget_type": "MOD", "argument_type": "Aim", "tokens": ["model", "parameters"], "offsets": [110, 111]}], "trigger": {"text": "infer", "tokens": ["infer"], "offsets": [109]}}, {"event_type": "CMP", "arguments": [{"text": "real - world datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["real", "-", "world", "datasets"], "offsets": [129, 130, 131, 132]}, {"text": "outperform", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperform"], "offsets": [137]}, {"text": "models", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["models"], "offsets": [136]}, {"text": "state - of - the - art baseline models", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art", "baseline", "models"], "offsets": [138, 139, 140, 141, 142, 143, 144, 145, 146]}, {"text": "perplexity", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["perplexity"], "offsets": [150]}], "trigger": {"text": "outperform", "tokens": ["outperform"], "offsets": [137]}}, {"event_type": "FAC", "arguments": [{"text": "gamma negative binomial - neural topic model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["gamma", "negative", "binomial", "-", "neural", "topic", "model"], "offsets": [85, 86, 87, 88, 89, 90, 91]}, {"text": "negative binomial - neural topic model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["negative", "binomial", "-", "neural", "topic", "model"], "offsets": [72, 73, 74, 75, 76, 77]}, {"text": "intermediate variables", "nugget_type": "MOD", "argument_type": "Object", "tokens": ["intermediate", "variables"], "offsets": [171, 172]}, {"text": "generating dispersed proportions of document topics", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["generating", "dispersed", "proportions", "of", "document", "topics"], "offsets": [174, 175, 176, 177, 178, 179]}], "trigger": {"text": "produce", "tokens": ["produce"], "offsets": [169]}}, {"event_type": "ITT", "arguments": [{"text": "mixed counting models", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["mixed", "counting", "models"], "offsets": [0, 1, 2]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [4]}}], "document": ["mixed", "counting", "models", "that", "use", "the", "negative", "binomial", "distribution", "as", "the", "prior", "can", "well", "model", "over", "-", "dispersed", "and", "hierarchically", "dependent", "random", "variables", ";", "thus", "they", "have", "attracted", "much", "attention", "in", "mining", "dispersed", "document", "topics", ".", "however", ",", "the", "existing", "parameter", "inference", "method", "like", "monte", "carlo", "sampling", "is", "quite", "time", "-", "consuming", ".", "in", "this", "paper", ",", "we", "propose", "two", "efficient", "neural", "mixed", "counting", "models", ",", "i", ".", "e", ".", ",", "the", "negative", "binomial", "-", "neural", "topic", "model", "(", "nb", "-", "ntm", ")", "and", "the", "gamma", "negative", "binomial", "-", "neural", "topic", "model", "(", "gnb", "-", "ntm", ")", "for", "dispersed", "topic", "discovery", ".", "neural", "variational", "inference", "algorithms", "are", "developed", "to", "infer", "model", "parameters", "by", "using", "the", "reparameterization", "of", "gamma", "distribution", "and", "the", "gaussian", "approximation", "of", "poisson", "distribution", ".", "experiments", "on", "real", "-", "world", "datasets", "indicate", "that", "our", "models", "outperform", "state", "-", "of", "-", "the", "-", "art", "baseline", "models", "in", "terms", "of", "perplexity", "and", "topic", "coherence", ".", "the", "results", "also", "validate", "that", "both", "nb", "-", "ntm", "and", "gnb", "-", "ntm", "can", "produce", "explainable", "intermediate", "variables", "by", "generating", "dispersed", "proportions", "of", "document", "topics", "."]}, {"venue": "ACL", "title": "Transferable Dialogue Systems and User Simulators", "abstract": "One of the difficulties in training dialogue systems is the lack of training data. We explore the possibility of creating dialogue data through the interaction between a dialogue system and a user simulator. Our goal is to develop a modelling framework that can incorporate new dialogue scenarios through self-play between the two agents. In this framework, we first pre-train the two agents on a collection of source domain dialogues, which equips the agents to converse with each other via natural language. With further fine-tuning on a small amount of target domain data, the agents continue to interact with the aim of improving their behaviors using reinforcement learning with structured reward functions. In experiments on the MultiWOZ dataset, two practical transfer learning problems are investigated: 1) domain adaptation and 2) single-to-multiple domain transfer. We demonstrate that the proposed framework is highly effective in bootstrapping the performance of the two agents in transfer learning. We also show that our method leads to improvements in dialogue system performance on complete datasets.", "doc_id": "bd6bd34e25e0db9156931fa755af59c6", "publication_year": 2021, "sentences": ["one of the difficulties in training dialogue systems is the lack of training data .", "we explore the possibility of creating dialogue data through the interaction between a dialogue system and a user simulator .", "our goal is to develop a modelling framework that can incorporate new dialogue scenarios through self - play between the two agents .", "in this framework , we first pre - train the two agents on a collection of source domain dialogues , which equips the agents to converse with each other via natural language .", "with further fine - tuning on a small amount of target domain data , the agents continue to interact with the aim of improving their behaviors using reinforcement learning with structured reward functions .", "in experiments on the multiwoz dataset , two practical transfer learning problems are investigated : 1 ) domain adaptation and 2 ) single - to - multiple domain transfer .", "we demonstrate that the proposed framework is highly effective in bootstrapping the performance of the two agents in transfer learning .", "we also show that our method leads to improvements in dialogue system performance on complete datasets ."], "events": [{"event_type": "ITT", "arguments": [{"text": "training dialogue systems", "nugget_type": "APP", "argument_type": "Target", "tokens": ["training", "dialogue", "systems"], "offsets": [5, 6, 7]}], "trigger": {"text": "data", "tokens": ["data"], "offsets": [13]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [15]}, {"text": "possibility of creating dialogue data", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["possibility", "of", "creating", "dialogue", "data"], "offsets": [18, 19, 20, 21, 22]}, {"text": "through the interaction between a dialogue system and a user simulator", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["through", "the", "interaction", "between", "a", "dialogue", "system", "and", "a", "user", "simulator"], "offsets": [23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33]}], "trigger": {"text": "explore", "tokens": ["explore"], "offsets": [16]}}, {"event_type": "MDS", "arguments": [{"text": "two agents", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["two", "agents"], "offsets": [68, 69]}, {"text": "collection of source domain dialogues", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["collection", "of", "source", "domain", "dialogues"], "offsets": [72, 73, 74, 75, 76]}, {"text": "equips", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["equips"], "offsets": [79]}], "trigger": {"text": "pre - train", "tokens": ["pre", "-", "train"], "offsets": [64, 65, 66]}}, {"event_type": "PUR", "arguments": [{"text": "agents to converse with each other", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["agents", "to", "converse", "with", "each", "other"], "offsets": [81, 82, 83, 84, 85, 86]}, {"text": "via natural language", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["via", "natural", "language"], "offsets": [87, 88, 89]}], "trigger": {"text": "equips", "tokens": ["equips"], "offsets": [79]}}, {"event_type": "MDS", "arguments": [{"text": "with further fine - tuning on a small amount of target domain data", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "further", "fine", "-", "tuning", "on", "a", "small", "amount", "of", "target", "domain", "data"], "offsets": [91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101, 102, 103]}, {"text": "agents", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["agents"], "offsets": [106]}, {"text": "improving", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improving"], "offsets": [114]}, {"text": "reinforcement learning with structured reward functions", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["reinforcement", "learning", "with", "structured", "reward", "functions"], "offsets": [118, 119, 120, 121, 122, 123]}], "trigger": {"text": "interact", "tokens": ["interact"], "offsets": [109]}}, {"event_type": "PUR", "arguments": [{"text": "behaviors", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["behaviors"], "offsets": [116]}], "trigger": {"text": "improving", "tokens": ["improving"], "offsets": [114]}}, {"event_type": "WKS", "arguments": [{"text": "multiwoz dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["multiwoz", "dataset"], "offsets": [129, 130]}, {"text": "domain adaptation", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["domain", "adaptation"], "offsets": [142, 143]}, {"text": "single - to - multiple domain transfer", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["single", "-", "to", "-", "multiple", "domain", "transfer"], "offsets": [147, 148, 149, 150, 151, 152, 153]}], "trigger": {"text": "investigated", "tokens": ["investigated"], "offsets": [138]}}, {"event_type": "FAC", "arguments": [{"text": "proposed framework", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["proposed", "framework"], "offsets": [159, 160]}, {"text": "in transfer learning", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "transfer", "learning"], "offsets": [172, 173, 174]}, {"text": "bootstrapping", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["bootstrapping"], "offsets": [165]}, {"text": "highly", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["highly"], "offsets": [162]}], "trigger": {"text": "effective", "tokens": ["effective"], "offsets": [163]}}, {"event_type": "PUR", "arguments": [{"text": "performance of the two agents", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["performance", "of", "the", "two", "agents"], "offsets": [167, 168, 169, 170, 171]}], "trigger": {"text": "bootstrapping", "tokens": ["bootstrapping"], "offsets": [165]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [176]}, {"text": "improvements", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["improvements"], "offsets": [184]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [178]}}, {"event_type": "FAC", "arguments": [{"text": "on complete datasets", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "complete", "datasets"], "offsets": [189, 190, 191]}, {"text": "dialogue system performance", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["dialogue", "system", "performance"], "offsets": [186, 187, 188]}, {"text": "method", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["method"], "offsets": [181]}], "trigger": {"text": "improvements", "tokens": ["improvements"], "offsets": [184]}}], "document": ["one", "of", "the", "difficulties", "in", "training", "dialogue", "systems", "is", "the", "lack", "of", "training", "data", ".", "we", "explore", "the", "possibility", "of", "creating", "dialogue", "data", "through", "the", "interaction", "between", "a", "dialogue", "system", "and", "a", "user", "simulator", ".", "our", "goal", "is", "to", "develop", "a", "modelling", "framework", "that", "can", "incorporate", "new", "dialogue", "scenarios", "through", "self", "-", "play", "between", "the", "two", "agents", ".", "in", "this", "framework", ",", "we", "first", "pre", "-", "train", "the", "two", "agents", "on", "a", "collection", "of", "source", "domain", "dialogues", ",", "which", "equips", "the", "agents", "to", "converse", "with", "each", "other", "via", "natural", "language", ".", "with", "further", "fine", "-", "tuning", "on", "a", "small", "amount", "of", "target", "domain", "data", ",", "the", "agents", "continue", "to", "interact", "with", "the", "aim", "of", "improving", "their", "behaviors", "using", "reinforcement", "learning", "with", "structured", "reward", "functions", ".", "in", "experiments", "on", "the", "multiwoz", "dataset", ",", "two", "practical", "transfer", "learning", "problems", "are", "investigated", ":", "1", ")", "domain", "adaptation", "and", "2", ")", "single", "-", "to", "-", "multiple", "domain", "transfer", ".", "we", "demonstrate", "that", "the", "proposed", "framework", "is", "highly", "effective", "in", "bootstrapping", "the", "performance", "of", "the", "two", "agents", "in", "transfer", "learning", ".", "we", "also", "show", "that", "our", "method", "leads", "to", "improvements", "in", "dialogue", "system", "performance", "on", "complete", "datasets", "."]}, {"venue": "ACL", "title": "Correlating Neural and Symbolic Representations of Language", "abstract": "Analysis methods which enable us to better understand the representations and functioning of neural models of language are increasingly needed as deep learning becomes the dominant approach in NLP. Here we present two methods based on Representational Similarity Analysis (RSA) and Tree Kernels (TK) which allow us to directly quantify how strongly the information encoded in neural activation patterns corresponds to information represented by symbolic structures such as syntax trees. We first validate our methods on the case of a simple synthetic language for arithmetic expressions with clearly defined syntax and semantics, and show that they exhibit the expected pattern of results. We then our methods to correlate neural representations of English sentences with their constituency parse trees.", "doc_id": "5f8365de757e69bbad89d92604d06999", "publication_year": 2019, "sentences": ["analysis methods which enable us to better understand the representations and functioning of neural models of language are increasingly needed as deep learning becomes the dominant approach in nlp .", "here we present two methods based on representational similarity analysis ( rsa ) and tree kernels ( tk ) which allow us to directly quantify how strongly the information encoded in neural activation patterns corresponds to information represented by symbolic structures such as syntax trees .", "we first validate our methods on the case of a simple synthetic language for arithmetic expressions with clearly defined syntax and semantics , and show that they exhibit the expected pattern of results .", "we then our methods to correlate neural representations of english sentences with their constituency parse trees ."], "events": [{"event_type": "ITT", "arguments": [{"text": "analysis methods", "nugget_type": "APP", "argument_type": "Target", "tokens": ["analysis", "methods"], "offsets": [0, 1]}], "trigger": {"text": "enable", "tokens": ["enable"], "offsets": [3]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [31]}, {"text": "representational similarity analysis", "nugget_type": "APP", "argument_type": "Content", "tokens": ["representational", "similarity", "analysis"], "offsets": [37, 38, 39]}, {"text": "tree kernels", "nugget_type": "APP", "argument_type": "Content", "tokens": ["tree", "kernels"], "offsets": [44, 45]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [32]}}, {"event_type": "MDS", "arguments": [{"text": "information encoded in neural activation patterns", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["information", "encoded", "in", "neural", "activation", "patterns"], "offsets": [58, 59, 60, 61, 62, 63]}, {"text": "symbolic structures", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["symbolic", "structures"], "offsets": [69, 70]}], "trigger": {"text": "directly quantify", "tokens": ["directly", "quantify"], "offsets": [53, 54]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [76]}, {"text": "representational similarity analysis", "nugget_type": "APP", "argument_type": "Content", "tokens": ["representational", "similarity", "analysis"], "offsets": [37, 38, 39]}, {"text": "tree kernels", "nugget_type": "APP", "argument_type": "Content", "tokens": ["tree", "kernels"], "offsets": [44, 45]}, {"text": "on the case of a simple synthetic language", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "the", "case", "of", "a", "simple", "synthetic", "language"], "offsets": [81, 82, 83, 84, 85, 86, 87, 88]}, {"text": "arithmetic expressions", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["arithmetic", "expressions"], "offsets": [90, 91]}], "trigger": {"text": "validate", "tokens": ["validate"], "offsets": [78]}}, {"event_type": "FIN", "arguments": [{"text": "exhibit", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["exhibit"], "offsets": [103]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [100]}}, {"event_type": "FAC", "arguments": [{"text": "representational similarity analysis", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["representational", "similarity", "analysis"], "offsets": [37, 38, 39]}, {"text": "tree kernels", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["tree", "kernels"], "offsets": [44, 45]}, {"text": "expected pattern of results", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["expected", "pattern", "of", "results"], "offsets": [105, 106, 107, 108]}], "trigger": {"text": "exhibit", "tokens": ["exhibit"], "offsets": [103]}}, {"event_type": "MDS", "arguments": [{"text": "neural representations of english sentences", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["neural", "representations", "of", "english", "sentences"], "offsets": [116, 117, 118, 119, 120]}, {"text": "constituency parse trees", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["constituency", "parse", "trees"], "offsets": [123, 124, 125]}], "trigger": {"text": "correlate", "tokens": ["correlate"], "offsets": [115]}}], "document": ["analysis", "methods", "which", "enable", "us", "to", "better", "understand", "the", "representations", "and", "functioning", "of", "neural", "models", "of", "language", "are", "increasingly", "needed", "as", "deep", "learning", "becomes", "the", "dominant", "approach", "in", "nlp", ".", "here", "we", "present", "two", "methods", "based", "on", "representational", "similarity", "analysis", "(", "rsa", ")", "and", "tree", "kernels", "(", "tk", ")", "which", "allow", "us", "to", "directly", "quantify", "how", "strongly", "the", "information", "encoded", "in", "neural", "activation", "patterns", "corresponds", "to", "information", "represented", "by", "symbolic", "structures", "such", "as", "syntax", "trees", ".", "we", "first", "validate", "our", "methods", "on", "the", "case", "of", "a", "simple", "synthetic", "language", "for", "arithmetic", "expressions", "with", "clearly", "defined", "syntax", "and", "semantics", ",", "and", "show", "that", "they", "exhibit", "the", "expected", "pattern", "of", "results", ".", "we", "then", "our", "methods", "to", "correlate", "neural", "representations", "of", "english", "sentences", "with", "their", "constituency", "parse", "trees", "."]}, {"venue": "ACL", "title": "From Zero to Hero: Human-In-The-Loop Entity Linking in Low Resource Domains", "abstract": "Entity linking (EL) is concerned with disambiguating entity mentions in a text against knowledge bases (KB). It is crucial in a considerable number of fields like humanities, technical writing and biomedical sciences to enrich texts with semantics and discover more knowledge. The use of EL in such domains requires handling noisy texts, low resource settings and domain-specific KBs. Existing approaches are mostly inappropriate for this, as they depend on training data. However, in the above scenario, there exists hardly annotated data, and it needs to be created from scratch. We therefore present a novel domain-agnostic Human-In-The-Loop annotation approach: we use recommenders that suggest potential concepts and adaptive candidate ranking, thereby speeding up the overall annotation process and making it less tedious for users. We evaluate our ranking approach in a simulation on difficult texts and show that it greatly outperforms a strong baseline in ranking accuracy. In a user study, the annotation speed improves by 35% compared to annotating without interactive support; users report that they strongly prefer our system. An open-source and ready-to-use implementation based on the text annotation platform INCEpTION (https://inception-project.github.io) is made available.", "doc_id": "12fc30628e4204957fd76ad85458134d", "publication_year": 2020, "sentences": ["entity linking ( el ) is concerned with disambiguating entity mentions in a text against knowledge bases ( kb ) .", "it is crucial in a considerable number of fields like humanities , technical writing and biomedical sciences to enrich texts with semantics and discover more knowledge .", "the use of el in such domains requires handling noisy texts , low resource settings and domain - specific kbs .", "existing approaches are mostly inappropriate for this , as they depend on training data .", "however , in the above scenario , there exists hardly annotated data , and it needs to be created from scratch .", "we therefore present a novel domain - agnostic human - in - the - loop annotation approach : we use recommenders that suggest potential concepts and adaptive candidate ranking , thereby speeding up the overall annotation process and making it less tedious for users .", "we evaluate our ranking approach in a simulation on difficult texts and show that it greatly outperforms a strong baseline in ranking accuracy .", "in a user study , the annotation speed improves by 35 % compared to annotating without interactive support ; users report that they strongly prefer our system .", "an open - source and ready - to - use implementation based on the text annotation platform inception ( https : / / inception - project . github . io ) is made available ."], "events": [{"event_type": "ITT", "arguments": [{"text": "entity linking", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["entity", "linking"], "offsets": [0, 1]}], "trigger": {"text": "concerned", "tokens": ["concerned"], "offsets": [6]}}, {"event_type": "RWF", "arguments": [{"text": "hardly annotated data", "nugget_type": "DST", "argument_type": "Fault", "tokens": ["hardly", "annotated", "data"], "offsets": [93, 94, 95]}], "trigger": {"text": "exists", "tokens": ["exists"], "offsets": [92]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [106]}, {"text": "domain - agnostic human - in - the - loop annotation approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["domain", "-", "agnostic", "human", "-", "in", "-", "the", "-", "loop", "annotation", "approach"], "offsets": [111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [108]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [124]}, {"text": "potential concepts", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["potential", "concepts"], "offsets": [129, 130]}, {"text": "adaptive candidate ranking", "nugget_type": "APP", "argument_type": "Content", "tokens": ["adaptive", "candidate", "ranking"], "offsets": [132, 133, 134]}, {"text": "speeding up", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["speeding", "up"], "offsets": [137, 138]}, {"text": "making", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["making"], "offsets": [144]}], "trigger": {"text": "suggest", "tokens": ["suggest"], "offsets": [128]}}, {"event_type": "PUR", "arguments": [{"text": "overall annotation process", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["overall", "annotation", "process"], "offsets": [140, 141, 142]}], "trigger": {"text": "speeding up", "tokens": ["speeding", "up"], "offsets": [137, 138]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [151]}, {"text": "in a simulation on difficult texts", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "a", "simulation", "on", "difficult", "texts"], "offsets": [156, 157, 158, 159, 160, 161]}, {"text": "domain - agnostic human - in - the - loop annotation approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["domain", "-", "agnostic", "human", "-", "in", "-", "the", "-", "loop", "annotation", "approach"], "offsets": [111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122]}], "trigger": {"text": "evaluate", "tokens": ["evaluate"], "offsets": [152]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [151]}, {"text": "outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforms"], "offsets": [167]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [163]}}, {"event_type": "CMP", "arguments": [{"text": "domain - agnostic human - in - the - loop annotation approach", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["domain", "-", "agnostic", "human", "-", "in", "-", "the", "-", "loop", "annotation", "approach"], "offsets": [111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122]}, {"text": "greatly", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["greatly"], "offsets": [166]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [167]}, {"text": "strong baseline", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["strong", "baseline"], "offsets": [169, 170]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [167]}}, {"event_type": "CMP", "arguments": [{"text": "35 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["35", "%"], "offsets": [185, 186]}, {"text": "improves", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improves"], "offsets": [183]}, {"text": "without interactive support", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["without", "interactive", "support"], "offsets": [190, 191, 192]}, {"text": "annotating", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["annotating"], "offsets": [189]}], "trigger": {"text": "compared", "tokens": ["compared"], "offsets": [187]}}, {"event_type": "PUR", "arguments": [{"text": "less tedious for users", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["less", "tedious", "for", "users"], "offsets": [146, 147, 148, 149]}], "trigger": {"text": "making", "tokens": ["making"], "offsets": [144]}}], "document": ["entity", "linking", "(", "el", ")", "is", "concerned", "with", "disambiguating", "entity", "mentions", "in", "a", "text", "against", "knowledge", "bases", "(", "kb", ")", ".", "it", "is", "crucial", "in", "a", "considerable", "number", "of", "fields", "like", "humanities", ",", "technical", "writing", "and", "biomedical", "sciences", "to", "enrich", "texts", "with", "semantics", "and", "discover", "more", "knowledge", ".", "the", "use", "of", "el", "in", "such", "domains", "requires", "handling", "noisy", "texts", ",", "low", "resource", "settings", "and", "domain", "-", "specific", "kbs", ".", "existing", "approaches", "are", "mostly", "inappropriate", "for", "this", ",", "as", "they", "depend", "on", "training", "data", ".", "however", ",", "in", "the", "above", "scenario", ",", "there", "exists", "hardly", "annotated", "data", ",", "and", "it", "needs", "to", "be", "created", "from", "scratch", ".", "we", "therefore", "present", "a", "novel", "domain", "-", "agnostic", "human", "-", "in", "-", "the", "-", "loop", "annotation", "approach", ":", "we", "use", "recommenders", "that", "suggest", "potential", "concepts", "and", "adaptive", "candidate", "ranking", ",", "thereby", "speeding", "up", "the", "overall", "annotation", "process", "and", "making", "it", "less", "tedious", "for", "users", ".", "we", "evaluate", "our", "ranking", "approach", "in", "a", "simulation", "on", "difficult", "texts", "and", "show", "that", "it", "greatly", "outperforms", "a", "strong", "baseline", "in", "ranking", "accuracy", ".", "in", "a", "user", "study", ",", "the", "annotation", "speed", "improves", "by", "35", "%", "compared", "to", "annotating", "without", "interactive", "support", ";", "users", "report", "that", "they", "strongly", "prefer", "our", "system", ".", "an", "open", "-", "source", "and", "ready", "-", "to", "-", "use", "implementation", "based", "on", "the", "text", "annotation", "platform", "inception", "(", "https", ":", "/", "/", "inception", "-", "project", ".", "github", ".", "io", ")", "is", "made", "available", "."]}, {"venue": "ACL", "title": "TableFormer: Robust Transformer Modeling for Table-Text Encoding", "abstract": "Understanding tables is an important aspect of natural language understanding. Existing models for table understanding require linearization of the table structure, where row or column order is encoded as an unwanted bias. Such spurious biases make the model vulnerable to row and column order perturbations. Additionally, prior work has not thoroughly modeled the table structures or table-text alignments, hindering the table-text understanding ability. In this work, we propose a robust and structurally aware table-text encoding architecture TableFormer, where tabular structural biases are incorporated completely through learnable attention biases. TableFormer is (1) strictly invariant to row and column orders, and, (2) could understand tables better due to its tabular inductive biases. Our evaluations showed that TableFormer outperforms strong baselines in all settings on SQA, WTQ and TabFact table reasoning datasets, and achieves state-of-the-art performance on SQA, especially when facing answer-invariant row and column order perturbations (6% improvement over the best baseline), because previous SOTA models\u2019 performance drops by 4% - 6% when facing such perturbations while TableFormer is not affected.", "doc_id": "1199c015d03a86a52683fcec3cb567c4", "publication_year": 2022, "sentences": ["understanding tables is an important aspect of natural language understanding .", "existing models for table understanding require linearization of the table structure , where row or column order is encoded as an unwanted bias .", "such spurious biases make the model vulnerable to row and column order perturbations .", "additionally , prior work has not thoroughly modeled the table structures or table - text alignments , hindering the table - text understanding ability .", "in this work , we propose a robust and structurally aware table - text encoding architecture tableformer , where tabular structural biases are incorporated completely through learnable attention biases .", "tableformer is ( 1 ) strictly invariant to row and column orders , and , ( 2 ) could understand tables better due to its tabular inductive biases .", "our evaluations showed that tableformer outperforms strong baselines in all settings on sqa , wtq and tabfact table reasoning datasets , and achieves state - of - the - art performance on sqa , especially when facing answer - invariant row and column order perturbations ( 6 % improvement over the best baseline ) , because previous sota models \u2019 performance drops by 4 % - 6 % when facing such perturbations while tableformer is not affected ."], "events": [{"event_type": "ITT", "arguments": [{"text": "natural language understanding", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["natural", "language", "understanding"], "offsets": [7, 8, 9]}], "trigger": {"text": "aspect", "tokens": ["aspect"], "offsets": [5]}}, {"event_type": "RWF", "arguments": [{"text": "unwanted bias", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["unwanted", "bias"], "offsets": [32, 33]}, {"text": "row order", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["row", "order"], "offsets": [24, 27]}, {"text": "column order", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["column", "order"], "offsets": [26, 27]}], "trigger": {"text": "encoded", "tokens": ["encoded"], "offsets": [29]}}, {"event_type": "RWF", "arguments": [{"text": "model", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["model"], "offsets": [40]}, {"text": "vulnerable", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["vulnerable"], "offsets": [41]}, {"text": "row order perturbations", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["row", "order", "perturbations"], "offsets": [43, 46, 47]}], "trigger": {"text": "vulnerable", "tokens": ["vulnerable"], "offsets": [41]}}, {"event_type": "RWF", "arguments": [{"text": "prior work", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["prior", "work"], "offsets": [51, 52]}, {"text": "not thoroughly modeled", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["not", "thoroughly", "modeled"], "offsets": [54, 55, 56]}, {"text": "table structures", "nugget_type": "FEA", "argument_type": "Fault", "tokens": ["table", "structures"], "offsets": [58, 59]}], "trigger": {"text": "not thoroughly modeled", "tokens": ["not", "thoroughly", "modeled"], "offsets": [54, 55, 56]}}, {"event_type": "RWF", "arguments": [{"text": "hindering", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["hindering"], "offsets": [66]}], "trigger": {"text": "hindering", "tokens": ["hindering"], "offsets": [66]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [78]}, {"text": "robust and structurally aware table - text encoding architecture tableformer", "nugget_type": "APP", "argument_type": "Content", "tokens": ["robust", "and", "structurally", "aware", "table", "-", "text", "encoding", "architecture", "tableformer"], "offsets": [81, 82, 83, 84, 85, 86, 87, 88, 89, 90]}, {"text": "incorporated", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["incorporated"], "offsets": [97]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [79]}}, {"event_type": "PUR", "arguments": [{"text": "tabular structural biases", "nugget_type": "WEA", "argument_type": "Aim", "tokens": ["tabular", "structural", "biases"], "offsets": [93, 94, 95]}, {"text": "through learnable attention biases", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["through", "learnable", "attention", "biases"], "offsets": [99, 100, 101, 102]}], "trigger": {"text": "incorporated", "tokens": ["incorporated"], "offsets": [97]}}, {"event_type": "FIN", "arguments": [{"text": "outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforms"], "offsets": [138]}, {"text": "achieves", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["achieves"], "offsets": [155]}, {"text": "not affected", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["not", "affected"], "offsets": [208, 209]}], "trigger": {"text": "showed", "tokens": ["showed"], "offsets": [135]}}, {"event_type": "FAC", "arguments": [{"text": "tableformer", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["tableformer"], "offsets": [137]}, {"text": "state - of - the - art performance", "nugget_type": "STR", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art", "performance"], "offsets": [156, 157, 158, 159, 160, 161, 162, 163]}, {"text": "sqa", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["sqa"], "offsets": [165]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [155]}}, {"event_type": "FAC", "arguments": [{"text": "when facing such perturbations", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "facing", "such", "perturbations"], "offsets": [201, 202, 203, 204]}, {"text": "tableformer", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["tableformer"], "offsets": [206]}], "trigger": {"text": "not affected", "tokens": ["not", "affected"], "offsets": [208, 209]}}, {"event_type": "CMP", "arguments": [{"text": "tableformer", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["tableformer"], "offsets": [137]}, {"text": "in all settings", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "all", "settings"], "offsets": [141, 142, 143]}, {"text": "sqa", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["sqa"], "offsets": [145]}, {"text": "wtq", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["wtq"], "offsets": [147]}, {"text": "tabfact table reasoning datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["tabfact", "table", "reasoning", "datasets"], "offsets": [149, 150, 151, 152]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [138]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [138]}}], "document": ["understanding", "tables", "is", "an", "important", "aspect", "of", "natural", "language", "understanding", ".", "existing", "models", "for", "table", "understanding", "require", "linearization", "of", "the", "table", "structure", ",", "where", "row", "or", "column", "order", "is", "encoded", "as", "an", "unwanted", "bias", ".", "such", "spurious", "biases", "make", "the", "model", "vulnerable", "to", "row", "and", "column", "order", "perturbations", ".", "additionally", ",", "prior", "work", "has", "not", "thoroughly", "modeled", "the", "table", "structures", "or", "table", "-", "text", "alignments", ",", "hindering", "the", "table", "-", "text", "understanding", "ability", ".", "in", "this", "work", ",", "we", "propose", "a", "robust", "and", "structurally", "aware", "table", "-", "text", "encoding", "architecture", "tableformer", ",", "where", "tabular", "structural", "biases", "are", "incorporated", "completely", "through", "learnable", "attention", "biases", ".", "tableformer", "is", "(", "1", ")", "strictly", "invariant", "to", "row", "and", "column", "orders", ",", "and", ",", "(", "2", ")", "could", "understand", "tables", "better", "due", "to", "its", "tabular", "inductive", "biases", ".", "our", "evaluations", "showed", "that", "tableformer", "outperforms", "strong", "baselines", "in", "all", "settings", "on", "sqa", ",", "wtq", "and", "tabfact", "table", "reasoning", "datasets", ",", "and", "achieves", "state", "-", "of", "-", "the", "-", "art", "performance", "on", "sqa", ",", "especially", "when", "facing", "answer", "-", "invariant", "row", "and", "column", "order", "perturbations", "(", "6", "%", "improvement", "over", "the", "best", "baseline", ")", ",", "because", "previous", "sota", "models", "\u2019", "performance", "drops", "by", "4", "%", "-", "6", "%", "when", "facing", "such", "perturbations", "while", "tableformer", "is", "not", "affected", "."]}, {"venue": "ACL", "title": "ExtEnD: Extractive Entity Disambiguation", "abstract": "Local models for Entity Disambiguation (ED) have today become extremely powerful, in most part thanks to the advent of large pre-trained language models. However, despite their significant performance achievements, most of these approaches frame ED through classification formulations that have intrinsic limitations, both computationally and from a modeling perspective. In contrast with this trend, here we propose ExtEnD, a novel local formulation for ED where we frame this task as a text extraction problem, and present two Transformer-based architectures that implement it. Based on experiments in and out of domain, and training over two different data regimes, we find our approach surpasses all its competitors in terms of both data efficiency and raw performance. ExtEnD outperforms its alternatives by as few as 6 F1 points on the more constrained of the two data regimes and, when moving to the other higher-resourced regime, sets a new state of the art on 4 out of 4 benchmarks under consideration, with average improvements of 0.7 F1 points overall and 1.1 F1 points out of domain. In addition, to gain better insights from our results, we also perform a fine-grained evaluation of our performances on different classes of label frequency, along with an ablation study of our architectural choices and an error analysis. We release our code and models for research purposes at https://github.com/SapienzaNLP/extend.", "doc_id": "3370564fa51dd1ecf0517ea07d4aa57d", "publication_year": 2022, "sentences": ["local models for entity disambiguation ( ed ) have today become extremely powerful , in most part thanks to the advent of large pre - trained language models .", "however , despite their significant performance achievements , most of these approaches frame ed through classification formulations that have intrinsic limitations , both computationally and from a modeling perspective .", "in contrast with this trend , here we propose extend , a novel local formulation for ed where we frame this task as a text extraction problem , and present two transformer - based architectures that implement it .", "based on experiments in and out of domain , and training over two different data regimes , we find our approach surpasses all its competitors in terms of both data efficiency and raw performance . extend", "outperforms its alternatives by as few as 6 f1 points on the more constrained of the two data regimes and , when moving to the other higher - resourced regime , sets a new state of the art on 4 out of 4 benchmarks under consideration , with average improvements of 0 . 7 f1 points overall and 1 . 1 f1 points out of domain .", "in addition , to gain better insights from our results , we also perform a fine - grained evaluation of our performances on different classes of label frequency , along with an ablation study of our architectural choices and an error analysis .", "we release our code and models for research purposes at https : / / github . com / sapienzanlp / extend ."], "events": [{"event_type": "ITT", "arguments": [{"text": "entity disambiguation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["entity", "disambiguation"], "offsets": [3, 4]}], "trigger": {"text": "become", "tokens": ["become"], "offsets": [10]}}, {"event_type": "RWF", "arguments": [{"text": "intrinsic limitations", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["intrinsic", "limitations"], "offsets": [48, 49]}, {"text": "classification formulations", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["classification", "formulations"], "offsets": [44, 45]}], "trigger": {"text": "have", "tokens": ["have"], "offsets": [47]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [66]}, {"text": "local formulation", "nugget_type": "APP", "argument_type": "Content", "tokens": ["local", "formulation"], "offsets": [72, 73]}, {"text": "ed", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["entity", "disambiguation"], "offsets": [3, 4]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [67]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [66]}, {"text": "two transformer - based architectures", "nugget_type": "APP", "argument_type": "Content", "tokens": ["two", "transformer", "-", "based", "architectures"], "offsets": [89, 90, 91, 92, 93]}, {"text": "implement", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["implement"], "offsets": [95]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [88]}}, {"event_type": "PUR", "arguments": [{"text": "it", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["text", "extraction", "problem"], "offsets": [83, 84, 85]}], "trigger": {"text": "implement", "tokens": ["implement"], "offsets": [95]}}, {"event_type": "CMP", "arguments": [{"text": "competitors", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["competitors"], "offsets": [122]}, {"text": "data efficiency and raw performance", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["data", "efficiency", "and", "raw", "performance"], "offsets": [127, 128, 129, 130, 131]}, {"text": "in and out of domain", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "and", "out", "of", "domain"], "offsets": [101, 102, 103, 104, 105]}, {"text": "training over two different data regimes", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["training", "over", "two", "different", "data", "regimes"], "offsets": [108, 109, 110, 111, 112, 113]}, {"text": "local formulation", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["local", "formulation"], "offsets": [72, 73]}], "trigger": {"text": "surpasses", "tokens": ["surpasses"], "offsets": [119]}}, {"event_type": "CMP", "arguments": [{"text": "extend", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["extend"], "offsets": [133]}, {"text": "alternatives", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["alternatives"], "offsets": [136]}, {"text": "6", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["6"], "offsets": [141]}, {"text": "f1 points", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["f1", "points"], "offsets": [142, 143]}, {"text": "as few as", "nugget_type": "STR", "argument_type": "Result", "tokens": ["as", "few", "as"], "offsets": [138, 139, 140]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [134]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [134]}}, {"event_type": "FAC", "arguments": [{"text": "when moving to the other higher - resourced regime", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "moving", "to", "the", "other", "higher", "-", "resourced", "regime"], "offsets": [155, 156, 157, 158, 159, 160, 161, 162, 163]}, {"text": "extend", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["extend"], "offsets": [133]}, {"text": "new state of the art", "nugget_type": "STR", "argument_type": "Object", "tokens": ["new", "state", "of", "the", "art"], "offsets": [167, 168, 169, 170, 171]}, {"text": "4 out of 4 benchmarks under consideration", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["4", "out", "of", "4", "benchmarks", "under", "consideration"], "offsets": [173, 174, 175, 176, 177, 178, 179]}, {"text": "average improvements", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["average", "improvements"], "offsets": [182, 183]}, {"text": "0 . 7", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["0", ".", "7"], "offsets": [185, 186, 187]}, {"text": "f1 points overall", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["f1", "points", "overall"], "offsets": [188, 189, 190]}, {"text": "1 . 1", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["1", ".", "1"], "offsets": [192, 193, 194]}, {"text": "f1 points out of domain", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["f1", "points", "out", "of", "domain"], "offsets": [195, 196, 197, 198, 199]}], "trigger": {"text": "sets", "tokens": ["sets"], "offsets": [165]}}, {"event_type": "WKS", "arguments": [{"text": "gain", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["gain"], "offsets": [205]}, {"text": "fine - grained evaluation of our performances", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["fine", "-", "grained", "evaluation", "of", "our", "performances"], "offsets": [216, 217, 218, 219, 220, 221, 222]}, {"text": "different classes of label frequency", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["different", "classes", "of", "label", "frequency"], "offsets": [224, 225, 226, 227, 228]}, {"text": "ablation study of our architectural choices", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["ablation", "study", "of", "our", "architectural", "choices"], "offsets": [233, 234, 235, 236, 237, 238]}, {"text": "error analysis", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["error", "analysis"], "offsets": [241, 242]}, {"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [212]}], "trigger": {"text": "perform", "tokens": ["perform"], "offsets": [214]}}, {"event_type": "PUR", "arguments": [{"text": "better insights", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["better", "insights"], "offsets": [206, 207]}, {"text": "from our results", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "our", "results"], "offsets": [208, 209, 210]}], "trigger": {"text": "gain", "tokens": ["gain"], "offsets": [205]}}], "document": ["local", "models", "for", "entity", "disambiguation", "(", "ed", ")", "have", "today", "become", "extremely", "powerful", ",", "in", "most", "part", "thanks", "to", "the", "advent", "of", "large", "pre", "-", "trained", "language", "models", ".", "however", ",", "despite", "their", "significant", "performance", "achievements", ",", "most", "of", "these", "approaches", "frame", "ed", "through", "classification", "formulations", "that", "have", "intrinsic", "limitations", ",", "both", "computationally", "and", "from", "a", "modeling", "perspective", ".", "in", "contrast", "with", "this", "trend", ",", "here", "we", "propose", "extend", ",", "a", "novel", "local", "formulation", "for", "ed", "where", "we", "frame", "this", "task", "as", "a", "text", "extraction", "problem", ",", "and", "present", "two", "transformer", "-", "based", "architectures", "that", "implement", "it", ".", "based", "on", "experiments", "in", "and", "out", "of", "domain", ",", "and", "training", "over", "two", "different", "data", "regimes", ",", "we", "find", "our", "approach", "surpasses", "all", "its", "competitors", "in", "terms", "of", "both", "data", "efficiency", "and", "raw", "performance", ".", "extend", "outperforms", "its", "alternatives", "by", "as", "few", "as", "6", "f1", "points", "on", "the", "more", "constrained", "of", "the", "two", "data", "regimes", "and", ",", "when", "moving", "to", "the", "other", "higher", "-", "resourced", "regime", ",", "sets", "a", "new", "state", "of", "the", "art", "on", "4", "out", "of", "4", "benchmarks", "under", "consideration", ",", "with", "average", "improvements", "of", "0", ".", "7", "f1", "points", "overall", "and", "1", ".", "1", "f1", "points", "out", "of", "domain", ".", "in", "addition", ",", "to", "gain", "better", "insights", "from", "our", "results", ",", "we", "also", "perform", "a", "fine", "-", "grained", "evaluation", "of", "our", "performances", "on", "different", "classes", "of", "label", "frequency", ",", "along", "with", "an", "ablation", "study", "of", "our", "architectural", "choices", "and", "an", "error", "analysis", ".", "we", "release", "our", "code", "and", "models", "for", "research", "purposes", "at", "https", ":", "/", "/", "github", ".", "com", "/", "sapienzanlp", "/", "extend", "."]}, {"venue": "ACL", "title": "Translationese as a Language in \u201cMultilingual\u201d NMT", "abstract": "Machine translation has an undesirable propensity to produce \u201ctranslationese\u201d artifacts, which can lead to higher BLEU scores while being liked less by human raters. Motivated by this, we model translationese and original (i.e. natural) text as separate languages in a multilingual model, and pose the question: can we perform zero-shot translation between original source text and original target text? There is no data with original source and original target, so we train a sentence-level classifier to distinguish translationese from original target text, and use this classifier to tag the training data for an NMT model. Using this technique we bias the model to produce more natural outputs at test time, yielding gains in human evaluation scores on both accuracy and fluency. Additionally, we demonstrate that it is possible to bias the model to produce translationese and game the BLEU score, increasing it while decreasing human-rated quality. We analyze these outputs using metrics measuring the degree of translationese, and present an analysis of the volatility of heuristic-based train-data tagging.", "doc_id": "f77e2f1a711f5545b43b227ec7eee1c7", "publication_year": 2020, "sentences": ["machine translation has an undesirable propensity to produce \u201c translationese \u201d artifacts , which can lead to higher bleu scores while being liked less by human raters .", "motivated by this , we model translationese and original ( i . e . natural ) text as separate languages in a multilingual model , and pose the question : can we perform zero - shot translation between original source text and original target text ?", "there is no data with original source and original target , so we train a sentence - level classifier to distinguish translationese from original target text , and use this classifier to tag the training data for an nmt model .", "using this technique we bias the model to produce more natural outputs at test time , yielding gains in human evaluation scores on both accuracy and fluency .", "additionally , we demonstrate that it is possible to bias the model to produce translationese and game the bleu score , increasing it while decreasing human - rated quality .", "we analyze these outputs using metrics measuring the degree of translationese , and present an analysis of the volatility of heuristic - based train - data tagging ."], "events": [{"event_type": "RWF", "arguments": [{"text": "\u201c translationese \u201d artifacts", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["\u201c", "translationese", "\u201d", "artifacts"], "offsets": [8, 9, 10, 11]}, {"text": "machine translation", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["machine", "translation"], "offsets": [0, 1]}], "trigger": {"text": "produce", "tokens": ["produce"], "offsets": [7]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [32]}, {"text": "separate languages", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["separate", "languages"], "offsets": [46, 47]}, {"text": "in a multilingual model", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "a", "multilingual", "model"], "offsets": [48, 49, 50, 51]}, {"text": "original ( i . e . natural ) text", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["original", "text"], "offsets": [36, 44]}, {"text": "translationese", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["translationese"], "offsets": [34]}], "trigger": {"text": "model", "tokens": ["model"], "offsets": [33]}}, {"event_type": "MDS", "arguments": [{"text": "distinguish", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["distinguish"], "offsets": [94]}, {"text": "sentence - level classifier", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["sentence", "-", "level", "classifier"], "offsets": [89, 90, 91, 92]}], "trigger": {"text": "train", "tokens": ["train"], "offsets": [87]}}, {"event_type": "PUR", "arguments": [{"text": "translationese", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["translationese"], "offsets": [95]}, {"text": "from original target text", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "original", "target", "text"], "offsets": [96, 97, 98, 99]}], "trigger": {"text": "distinguish", "tokens": ["distinguish"], "offsets": [94]}}, {"event_type": "MDS", "arguments": [{"text": "sentence - level classifier", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["sentence", "-", "level", "classifier"], "offsets": [89, 90, 91, 92]}, {"text": "tag", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["tag"], "offsets": [106]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [102]}}, {"event_type": "PUR", "arguments": [{"text": "training data", "nugget_type": "DST", "argument_type": "Aim", "tokens": ["training", "data"], "offsets": [108, 109]}, {"text": "nmt model", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["nmt", "model"], "offsets": [112, 113]}], "trigger": {"text": "tag", "tokens": ["tag"], "offsets": [106]}}, {"event_type": "FAC", "arguments": [{"text": "using this technique", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["using", "this", "technique"], "offsets": [115, 116, 117]}, {"text": "model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["model"], "offsets": [121]}, {"text": "more natural outputs", "nugget_type": "STR", "argument_type": "Object", "tokens": ["more", "natural", "outputs"], "offsets": [124, 125, 126]}, {"text": "at test time", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["at", "test", "time"], "offsets": [127, 128, 129]}], "trigger": {"text": "produce", "tokens": ["produce"], "offsets": [123]}}, {"event_type": "FAC", "arguments": [{"text": "using this technique", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["using", "this", "technique"], "offsets": [115, 116, 117]}, {"text": "model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["model"], "offsets": [121]}, {"text": "gains in human evaluation scores", "nugget_type": "STR", "argument_type": "Object", "tokens": ["gains", "in", "human", "evaluation", "scores"], "offsets": [132, 133, 134, 135, 136]}, {"text": "on both accuracy and fluency", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "both", "accuracy", "and", "fluency"], "offsets": [137, 138, 139, 140, 141]}], "trigger": {"text": "yielding", "tokens": ["yielding"], "offsets": [131]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [173]}, {"text": "metrics measuring the degree of translationese", "nugget_type": "APP", "argument_type": "Content", "tokens": ["metrics", "measuring", "the", "degree", "of", "translationese"], "offsets": [178, 179, 180, 181, 182, 183]}, {"text": "analyze", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["analyze"], "offsets": [174]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [177]}}, {"event_type": "PUR", "arguments": [{"text": "outputs", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["outputs"], "offsets": [176]}], "trigger": {"text": "analyze", "tokens": ["analyze"], "offsets": [174]}}, {"event_type": "RWF", "arguments": [{"text": "higher bleu scores", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["higher", "bleu", "scores"], "offsets": [17, 18, 19]}, {"text": "\u201c translationese \u201d artifacts", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["\u201c", "translationese", "\u201d", "artifacts"], "offsets": [8, 9, 10, 11]}], "trigger": {"text": "lead", "tokens": ["lead"], "offsets": [15]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [173]}, {"text": "volatility of heuristic - based train - data tagging", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["volatility", "of", "heuristic", "-", "based", "train", "-", "data", "tagging"], "offsets": [191, 192, 193, 194, 195, 196, 197, 198, 199]}], "trigger": {"text": "analysis", "tokens": ["analysis"], "offsets": [188]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [145]}, {"text": "possible to bias", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["possible", "to", "bias"], "offsets": [150, 151, 152]}, {"text": "increasing", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["increasing"], "offsets": [164]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [146]}}, {"event_type": "FAC", "arguments": [{"text": "model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["model"], "offsets": [154]}, {"text": "produce", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["produce"], "offsets": [156]}, {"text": "game", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["game"], "offsets": [159]}], "trigger": {"text": "possible to bias", "tokens": ["possible", "to", "bias"], "offsets": [150, 151, 152]}}, {"event_type": "PUR", "arguments": [{"text": "bleu score", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["bleu", "score"], "offsets": [161, 162]}], "trigger": {"text": "game", "tokens": ["game"], "offsets": [159]}}, {"event_type": "CMP", "arguments": [{"text": "while decreasing human - rated quality", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["while", "decreasing", "human", "-", "rated", "quality"], "offsets": [166, 167, 168, 169, 170, 171]}, {"text": "model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["model"], "offsets": [154]}], "trigger": {"text": "increasing", "tokens": ["increasing"], "offsets": [164]}}, {"event_type": "PUR", "arguments": [{"text": "translationese", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["translationese"], "offsets": [157]}], "trigger": {"text": "produce", "tokens": ["produce"], "offsets": [156]}}], "document": ["machine", "translation", "has", "an", "undesirable", "propensity", "to", "produce", "\u201c", "translationese", "\u201d", "artifacts", ",", "which", "can", "lead", "to", "higher", "bleu", "scores", "while", "being", "liked", "less", "by", "human", "raters", ".", "motivated", "by", "this", ",", "we", "model", "translationese", "and", "original", "(", "i", ".", "e", ".", "natural", ")", "text", "as", "separate", "languages", "in", "a", "multilingual", "model", ",", "and", "pose", "the", "question", ":", "can", "we", "perform", "zero", "-", "shot", "translation", "between", "original", "source", "text", "and", "original", "target", "text", "?", "there", "is", "no", "data", "with", "original", "source", "and", "original", "target", ",", "so", "we", "train", "a", "sentence", "-", "level", "classifier", "to", "distinguish", "translationese", "from", "original", "target", "text", ",", "and", "use", "this", "classifier", "to", "tag", "the", "training", "data", "for", "an", "nmt", "model", ".", "using", "this", "technique", "we", "bias", "the", "model", "to", "produce", "more", "natural", "outputs", "at", "test", "time", ",", "yielding", "gains", "in", "human", "evaluation", "scores", "on", "both", "accuracy", "and", "fluency", ".", "additionally", ",", "we", "demonstrate", "that", "it", "is", "possible", "to", "bias", "the", "model", "to", "produce", "translationese", "and", "game", "the", "bleu", "score", ",", "increasing", "it", "while", "decreasing", "human", "-", "rated", "quality", ".", "we", "analyze", "these", "outputs", "using", "metrics", "measuring", "the", "degree", "of", "translationese", ",", "and", "present", "an", "analysis", "of", "the", "volatility", "of", "heuristic", "-", "based", "train", "-", "data", "tagging", "."]}, {"venue": "ACL", "title": "Extractive Summarization as Text Matching", "abstract": "This paper creates a paradigm shift with regard to the way we build neural extractive summarization systems. Instead of following the commonly used framework of extracting sentences individually and modeling the relationship between sentences, we formulate the extractive summarization task as a semantic text matching problem, in which a source document and candidate summaries will be (extracted from the original text) matched in a semantic space. Notably, this paradigm shift to semantic matching framework is well-grounded in our comprehensive analysis of the inherent gap between sentence-level and summary-level extractors based on the property of the dataset. Besides, even instantiating the framework with a simple form of a matching model, we have driven the state-of-the-art extractive result on CNN/DailyMail to a new level (44.41 in ROUGE-1). Experiments on the other five datasets also show the effectiveness of the matching framework. We believe the power of this matching-based summarization framework has not been fully exploited. To encourage more instantiations in the future, we have released our codes, processed dataset, as well as generated summaries in https://github.com/maszhongming/MatchSum.", "doc_id": "2a01133f313a4d08b43688510781b053", "publication_year": 2020, "sentences": ["this paper creates a paradigm shift with regard to the way we build neural extractive summarization systems .", "instead of following the commonly used framework of extracting sentences individually and modeling the relationship between sentences , we formulate the extractive summarization task as a semantic text matching problem , in which a source document and candidate summaries will be ( extracted from the original text ) matched in a semantic space .", "notably , this paradigm shift to semantic matching framework is well - grounded in our comprehensive analysis of the inherent gap between sentence - level and summary - level extractors based on the property of the dataset .", "besides , even instantiating the framework with a simple form of a matching model , we have driven the state - of - the - art extractive result on cnn / dailymail to a new level ( 44 . 41 in rouge - 1 ) .", "experiments on the other five datasets also show the effectiveness of the matching framework .", "we believe the power of this matching - based summarization framework has not been fully exploited .", "to encourage more instantiations in the future , we have released our codes , processed dataset , as well as generated summaries in https : / / github . com / maszhongming / matchsum ."], "events": [{"event_type": "PRP", "arguments": [{"text": "paradigm shift", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["paradigm", "shift"], "offsets": [4, 5]}, {"text": "build", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["build"], "offsets": [12]}], "trigger": {"text": "creates", "tokens": ["creates"], "offsets": [2]}}, {"event_type": "PUR", "arguments": [{"text": "neural extractive summarization systems", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["neural", "extractive", "summarization", "systems"], "offsets": [13, 14, 15, 16]}], "trigger": {"text": "build", "tokens": ["build"], "offsets": [12]}}, {"event_type": "RWS", "arguments": [{"text": "commonly used framework", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["commonly", "used", "framework"], "offsets": [22, 23, 24]}, {"text": "sentences", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["sentences"], "offsets": [27]}], "trigger": {"text": "extracting", "tokens": ["extracting"], "offsets": [26]}}, {"event_type": "RWS", "arguments": [{"text": "commonly used framework", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["commonly", "used", "framework"], "offsets": [22, 23, 24]}, {"text": "relationship between sentences", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["relationship", "between", "sentences"], "offsets": [32, 33, 34]}], "trigger": {"text": "modeling", "tokens": ["modeling"], "offsets": [30]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [36]}, {"text": "extractive summarization task", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["extractive", "summarization", "task"], "offsets": [39, 40, 41]}, {"text": "semantic text matching problem", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["semantic", "text", "matching", "problem"], "offsets": [44, 45, 46, 47]}], "trigger": {"text": "formulate", "tokens": ["formulate"], "offsets": [37]}}, {"event_type": "MDS", "arguments": [{"text": "in a semantic space", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "a", "semantic", "space"], "offsets": [67, 68, 69, 70]}, {"text": "source document", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["source", "document"], "offsets": [52, 53]}, {"text": "candidate summaries", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["candidate", "summaries"], "offsets": [55, 56]}], "trigger": {"text": "matched", "tokens": ["matched"], "offsets": [66]}}, {"event_type": "MDS", "arguments": [{"text": "inherent gap between sentence - level and summary - level extractors", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["inherent", "gap", "between", "sentence", "-", "level", "and", "summary", "-", "level", "extractors"], "offsets": [91, 92, 93, 94, 95, 96, 97, 98, 99, 100, 101]}, {"text": "property of the dataset", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["property", "of", "the", "dataset"], "offsets": [105, 106, 107, 108]}], "trigger": {"text": "analysis", "tokens": ["analysis"], "offsets": [88]}}, {"event_type": "MDS", "arguments": [{"text": "simple form of a matching model", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["simple", "form", "of", "a", "matching", "model"], "offsets": [118, 119, 120, 121, 122, 123]}, {"text": "framework", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["framework"], "offsets": [115]}], "trigger": {"text": "instantiating", "tokens": ["instantiating"], "offsets": [113]}}, {"event_type": "CMP", "arguments": [{"text": "on cnn / dailymail", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "cnn", "/", "dailymail"], "offsets": [138, 139, 140, 141]}, {"text": "state - of - the - art extractive result", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["state", "-", "of", "-", "the", "-", "art", "extractive", "result"], "offsets": [129, 130, 131, 132, 133, 134, 135, 136, 137]}, {"text": "44 . 41 in rouge - 1", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["44", ".", "41", "in", "rouge", "-", "1"], "offsets": [147, 148, 149, 150, 151, 152, 153]}], "trigger": {"text": "driven", "tokens": ["driven"], "offsets": [127]}}, {"event_type": "FAC", "arguments": [{"text": "effectiveness of the matching framework", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["effectiveness", "of", "the", "matching", "framework"], "offsets": [165, 166, 167, 168, 169]}, {"text": "on the other five datasets", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "the", "other", "five", "datasets"], "offsets": [157, 158, 159, 160, 161]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [163]}}], "document": ["this", "paper", "creates", "a", "paradigm", "shift", "with", "regard", "to", "the", "way", "we", "build", "neural", "extractive", "summarization", "systems", ".", "instead", "of", "following", "the", "commonly", "used", "framework", "of", "extracting", "sentences", "individually", "and", "modeling", "the", "relationship", "between", "sentences", ",", "we", "formulate", "the", "extractive", "summarization", "task", "as", "a", "semantic", "text", "matching", "problem", ",", "in", "which", "a", "source", "document", "and", "candidate", "summaries", "will", "be", "(", "extracted", "from", "the", "original", "text", ")", "matched", "in", "a", "semantic", "space", ".", "notably", ",", "this", "paradigm", "shift", "to", "semantic", "matching", "framework", "is", "well", "-", "grounded", "in", "our", "comprehensive", "analysis", "of", "the", "inherent", "gap", "between", "sentence", "-", "level", "and", "summary", "-", "level", "extractors", "based", "on", "the", "property", "of", "the", "dataset", ".", "besides", ",", "even", "instantiating", "the", "framework", "with", "a", "simple", "form", "of", "a", "matching", "model", ",", "we", "have", "driven", "the", "state", "-", "of", "-", "the", "-", "art", "extractive", "result", "on", "cnn", "/", "dailymail", "to", "a", "new", "level", "(", "44", ".", "41", "in", "rouge", "-", "1", ")", ".", "experiments", "on", "the", "other", "five", "datasets", "also", "show", "the", "effectiveness", "of", "the", "matching", "framework", ".", "we", "believe", "the", "power", "of", "this", "matching", "-", "based", "summarization", "framework", "has", "not", "been", "fully", "exploited", ".", "to", "encourage", "more", "instantiations", "in", "the", "future", ",", "we", "have", "released", "our", "codes", ",", "processed", "dataset", ",", "as", "well", "as", "generated", "summaries", "in", "https", ":", "/", "/", "github", ".", "com", "/", "maszhongming", "/", "matchsum", "."]}, {"venue": "ACL", "title": "Complex Evolutional Pattern Learning for Temporal Knowledge Graph Reasoning", "abstract": "A Temporal Knowledge Graph (TKG) is a sequence of KGs corresponding to different timestamps. TKG reasoning aims to predict potential facts in the future given the historical KG sequences. One key of this task is to mine and understand evolutional patterns of facts from these sequences. The evolutional patterns are complex in two aspects, length-diversity and time-variability. Existing models for TKG reasoning focus on modeling fact sequences of a fixed length, which cannot discover complex evolutional patterns that vary in length. Furthermore, these models are all trained offline, which cannot well adapt to the changes of evolutional patterns from then on. Thus, we propose a new model, called Complex Evolutional Network (CEN), which uses a length-aware Convolutional Neural Network (CNN) to handle evolutional patterns of different lengths via an easy-to-difficult curriculum learning strategy. Besides, we propose to learn the model under the online setting so that it can adapt to the changes of evolutional patterns over time. Extensive experiments demonstrate that CEN obtains substantial performance improvement under both the traditional offline and the proposed online settings.", "doc_id": "afb7200633a7ff3ac1d70d4991c533e4", "publication_year": 2022, "sentences": ["a temporal knowledge graph ( tkg ) is a sequence of kgs corresponding to different timestamps .", "tkg reasoning aims to predict potential facts in the future given the historical kg sequences .", "one key of this task is to mine and understand evolutional patterns of facts from these sequences .", "the evolutional patterns are complex in two aspects , length - diversity and time - variability .", "existing models for tkg reasoning focus on modeling fact sequences of a fixed length , which cannot discover complex evolutional patterns that vary in length .", "furthermore , these models are all trained offline , which cannot well adapt to the changes of evolutional patterns from then on .", "thus , we propose a new model , called complex evolutional network ( cen ) , which uses a length - aware convolutional neural network ( cnn ) to handle evolutional patterns of different lengths via an easy - to - difficult curriculum learning strategy .", "besides , we propose to learn the model under the online setting so that it can adapt to the changes of evolutional patterns over time .", "extensive experiments demonstrate that cen obtains substantial performance improvement under both the traditional offline and the proposed online settings ."], "events": [{"event_type": "ITT", "arguments": [{"text": "tkg reasoning", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["temporal", "knowledge", "graph", "reasoning"], "offsets": [1, 2, 3, 18]}], "trigger": {"text": "predict", "tokens": ["predict"], "offsets": [21]}}, {"event_type": "RWF", "arguments": [{"text": "existing models for tkg reasoning", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["existing", "models", "for", "temporal", "knowledge", "graph", "reasoning"], "offsets": [68, 69, 70, 1, 2, 3, 72]}, {"text": "cannot discover", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["cannot", "discover"], "offsets": [84, 85]}], "trigger": {"text": "cannot discover", "tokens": ["cannot", "discover"], "offsets": [84, 85]}}, {"event_type": "RWF", "arguments": [{"text": "existing models for tkg reasoning", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["existing", "models", "for", "temporal", "knowledge", "graph", "reasoning"], "offsets": [68, 69, 70, 1, 2, 3, 72]}, {"text": "cannot well adapt", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["cannot", "well", "adapt"], "offsets": [104, 105, 106]}], "trigger": {"text": "cannot well adapt", "tokens": ["cannot", "well", "adapt"], "offsets": [104, 105, 106]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [119]}, {"text": "complex evolutional network", "nugget_type": "APP", "argument_type": "Content", "tokens": ["complex", "evolutional", "network"], "offsets": [126, 127, 128]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [120]}}, {"event_type": "MDS", "arguments": [{"text": "length - aware convolutional neural network", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["length", "-", "aware", "convolutional", "neural", "network"], "offsets": [136, 137, 138, 139, 140, 141]}, {"text": "handle", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["handle"], "offsets": [146]}, {"text": "easy - to - difficult curriculum learning strategy", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["easy", "-", "to", "-", "difficult", "curriculum", "learning", "strategy"], "offsets": [154, 155, 156, 157, 158, 159, 160, 161]}], "trigger": {"text": "uses", "tokens": ["uses"], "offsets": [134]}}, {"event_type": "PUR", "arguments": [{"text": "evolutional patterns of different lengths", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["evolutional", "patterns", "of", "different", "lengths"], "offsets": [147, 148, 149, 150, 151]}], "trigger": {"text": "handle", "tokens": ["handle"], "offsets": [146]}}, {"event_type": "PUR", "arguments": [{"text": "changes of evolutional patterns", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["changes", "of", "evolutional", "patterns"], "offsets": [182, 183, 184, 185]}, {"text": "over time", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["over", "time"], "offsets": [186, 187]}], "trigger": {"text": "adapt", "tokens": ["adapt"], "offsets": [179]}}, {"event_type": "FIN", "arguments": [{"text": "obtains", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["obtains"], "offsets": [194]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [191]}}, {"event_type": "FAC", "arguments": [{"text": "complex evolutional network", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["complex", "evolutional", "network"], "offsets": [126, 127, 128]}, {"text": "substantial performance improvement", "nugget_type": "STR", "argument_type": "Object", "tokens": ["substantial", "performance", "improvement"], "offsets": [195, 196, 197]}, {"text": "under both the traditional offline and the proposed online settings", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["under", "both", "the", "traditional", "offline", "and", "the", "proposed", "online", "settings"], "offsets": [198, 199, 200, 201, 202, 203, 204, 205, 206, 207]}], "trigger": {"text": "obtains", "tokens": ["obtains"], "offsets": [194]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [165]}, {"text": "model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["model"], "offsets": [170]}, {"text": "under the online setting", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["under", "the", "online", "setting"], "offsets": [171, 172, 173, 174]}, {"text": "adapt", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["adapt"], "offsets": [179]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [168]}}], "document": ["a", "temporal", "knowledge", "graph", "(", "tkg", ")", "is", "a", "sequence", "of", "kgs", "corresponding", "to", "different", "timestamps", ".", "tkg", "reasoning", "aims", "to", "predict", "potential", "facts", "in", "the", "future", "given", "the", "historical", "kg", "sequences", ".", "one", "key", "of", "this", "task", "is", "to", "mine", "and", "understand", "evolutional", "patterns", "of", "facts", "from", "these", "sequences", ".", "the", "evolutional", "patterns", "are", "complex", "in", "two", "aspects", ",", "length", "-", "diversity", "and", "time", "-", "variability", ".", "existing", "models", "for", "tkg", "reasoning", "focus", "on", "modeling", "fact", "sequences", "of", "a", "fixed", "length", ",", "which", "cannot", "discover", "complex", "evolutional", "patterns", "that", "vary", "in", "length", ".", "furthermore", ",", "these", "models", "are", "all", "trained", "offline", ",", "which", "cannot", "well", "adapt", "to", "the", "changes", "of", "evolutional", "patterns", "from", "then", "on", ".", "thus", ",", "we", "propose", "a", "new", "model", ",", "called", "complex", "evolutional", "network", "(", "cen", ")", ",", "which", "uses", "a", "length", "-", "aware", "convolutional", "neural", "network", "(", "cnn", ")", "to", "handle", "evolutional", "patterns", "of", "different", "lengths", "via", "an", "easy", "-", "to", "-", "difficult", "curriculum", "learning", "strategy", ".", "besides", ",", "we", "propose", "to", "learn", "the", "model", "under", "the", "online", "setting", "so", "that", "it", "can", "adapt", "to", "the", "changes", "of", "evolutional", "patterns", "over", "time", ".", "extensive", "experiments", "demonstrate", "that", "cen", "obtains", "substantial", "performance", "improvement", "under", "both", "the", "traditional", "offline", "and", "the", "proposed", "online", "settings", "."]}, {"venue": "ACL", "title": "Sentiment Tagging with Partial Labels using Modular Architectures", "abstract": "Many NLP learning tasks can be decomposed into several distinct sub-tasks, each associated with a partial label. In this paper we focus on a popular class of learning problems, sequence prediction applied to several sentiment analysis tasks, and suggest a modular learning approach in which different sub-tasks are learned using separate functional modules, combined to perform the final task while sharing information. Our experiments show this approach helps constrain the learning process and can alleviate some of the supervision efforts.", "doc_id": "739f409c0bde41cc80a60b16875b4310", "publication_year": 2019, "sentences": ["many nlp learning tasks can be decomposed into several distinct sub - tasks , each associated with a partial label .", "in this paper we focus on a popular class of learning problems , sequence prediction applied to several sentiment analysis tasks , and suggest a modular learning approach in which different sub - tasks are learned using separate functional modules , combined to perform the final task while sharing information .", "our experiments show this approach helps constrain the learning process and can alleviate some of the supervision efforts ."], "events": [{"event_type": "ITT", "arguments": [{"text": "nlp learning", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["nlp", "learning"], "offsets": [1, 2]}], "trigger": {"text": "decomposed", "tokens": ["decomposed"], "offsets": [6]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [24]}, {"text": "popular class of learning problems", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["popular", "class", "of", "learning", "problems"], "offsets": [28, 29, 30, 31, 32]}], "trigger": {"text": "focus on", "tokens": ["focus", "on"], "offsets": [25, 26]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [24]}, {"text": "modular learning approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["modular", "learning", "approach"], "offsets": [46, 47, 48]}, {"text": "combined to perform", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["combined", "to", "perform"], "offsets": [62, 63, 64]}], "trigger": {"text": "suggest", "tokens": ["suggest"], "offsets": [44]}}, {"event_type": "MDS", "arguments": [{"text": "different sub - tasks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["different", "sub", "-", "tasks"], "offsets": [51, 52, 53, 54]}, {"text": "separate functional modules", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["separate", "functional", "modules"], "offsets": [58, 59, 60]}], "trigger": {"text": "using", "tokens": ["using"], "offsets": [57]}}, {"event_type": "PUR", "arguments": [{"text": "final task", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["final", "task"], "offsets": [66, 67]}, {"text": "while sharing information", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["while", "sharing", "information"], "offsets": [68, 69, 70]}], "trigger": {"text": "combined to perform", "tokens": ["combined", "to", "perform"], "offsets": [62, 63, 64]}}, {"event_type": "FAC", "arguments": [{"text": "modular learning approach", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["modular", "learning", "approach"], "offsets": [46, 47, 48]}, {"text": "learning process", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["learning", "process"], "offsets": [80, 81]}], "trigger": {"text": "helps constrain", "tokens": ["helps", "constrain"], "offsets": [77, 78]}}, {"event_type": "FAC", "arguments": [{"text": "modular learning approach", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["modular", "learning", "approach"], "offsets": [46, 47, 48]}, {"text": "some of the supervision efforts", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["some", "of", "the", "supervision", "efforts"], "offsets": [85, 86, 87, 88, 89]}], "trigger": {"text": "alleviate", "tokens": ["alleviate"], "offsets": [84]}}], "document": ["many", "nlp", "learning", "tasks", "can", "be", "decomposed", "into", "several", "distinct", "sub", "-", "tasks", ",", "each", "associated", "with", "a", "partial", "label", ".", "in", "this", "paper", "we", "focus", "on", "a", "popular", "class", "of", "learning", "problems", ",", "sequence", "prediction", "applied", "to", "several", "sentiment", "analysis", "tasks", ",", "and", "suggest", "a", "modular", "learning", "approach", "in", "which", "different", "sub", "-", "tasks", "are", "learned", "using", "separate", "functional", "modules", ",", "combined", "to", "perform", "the", "final", "task", "while", "sharing", "information", ".", "our", "experiments", "show", "this", "approach", "helps", "constrain", "the", "learning", "process", "and", "can", "alleviate", "some", "of", "the", "supervision", "efforts", "."]}, {"venue": "ACL", "title": "Continuous Language Generative Flow", "abstract": "Recent years have witnessed various types of generative models for natural language generation (NLG), especially RNNs or transformer based sequence-to-sequence models, as well as variational autoencoder (VAE) and generative adversarial network (GAN) based models. However, flow-based generative models, which achieve strong performance in image generation due to their invertibility and exact density estimation properties, have been less explored for NLG. In this paper, we propose a flow-based language generation model by adapting previous flow generative models to language generation via continuous input embeddings, adapted affine coupling structures, and a novel architecture for autoregressive text generation. We also apply our framework to Sequence-to-Sequence generation, including text- and video-based Question Generation (QG) and Neural Machine Translation (NMT), and data augmentation for Question Answering (QA). We use our language flow model to provide extra input features for QG and NMT, which achieves improvements over the strong QG baselines on SQuAD and TVQA and NMT baseline on WMT16. We also augment QA data with new context by injecting noise to the latent features of the language flow and show this augmentation leads to a large performance improvement from strong baselines on SQuAD and TVQA.", "doc_id": "75ed6daa14fb4048a5213ab915e1ff5c", "publication_year": 2021, "sentences": ["recent years have witnessed various types of generative models for natural language generation ( nlg ) , especially rnns or transformer based sequence - to - sequence models , as well as variational autoencoder ( vae ) and generative adversarial network ( gan ) based models .", "however , flow - based generative models , which achieve strong performance in image generation due to their invertibility and exact density estimation properties , have been less explored for nlg .", "in this paper , we propose a flow - based language generation model by adapting previous flow generative models to language generation via continuous input embeddings , adapted affine coupling structures , and a novel architecture for autoregressive text generation .", "we also apply our framework to sequence - to - sequence generation , including text - and video - based question generation ( qg ) and neural machine translation ( nmt ) , and data augmentation for question answering ( qa ) .", "we use our language flow model to provide extra input features for qg and nmt , which achieves improvements over the strong qg baselines on squad and tvqa and nmt baseline on wmt16 .", "we also augment qa data with new context by injecting noise to the latent features of the language flow and show this augmentation leads to a large performance improvement from strong baselines on squad and tvqa ."], "events": [{"event_type": "ITT", "arguments": [{"text": "natural language generation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["natural", "language", "generation"], "offsets": [10, 11, 12]}], "trigger": {"text": "witnessed", "tokens": ["witnessed"], "offsets": [3]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [83]}, {"text": "flow - based language generation model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["flow", "-", "based", "language", "generation", "model"], "offsets": [86, 87, 88, 89, 90, 91]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [84]}}, {"event_type": "MDS", "arguments": [{"text": "previous flow generative models", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["previous", "flow", "generative", "models"], "offsets": [94, 95, 96, 97]}, {"text": "language generation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["language", "generation"], "offsets": [99, 100]}], "trigger": {"text": "adapting", "tokens": ["adapting"], "offsets": [93]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [120]}, {"text": "framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["framework"], "offsets": [124]}, {"text": "sequence - to - sequence generation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["sequence", "-", "to", "-", "sequence", "generation"], "offsets": [126, 127, 128, 129, 130, 131]}], "trigger": {"text": "apply", "tokens": ["apply"], "offsets": [122]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [163]}, {"text": "language flow model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["language", "flow", "model"], "offsets": [166, 167, 168]}, {"text": "provide", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["provide"], "offsets": [170]}, {"text": "question generation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["question", "generation"], "offsets": [140, 141]}, {"text": "neural machine translation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["neural", "machine", "translation"], "offsets": [146, 147, 148]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [164]}}, {"event_type": "PUR", "arguments": [{"text": "extra input features", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["extra", "input", "features"], "offsets": [171, 172, 173]}], "trigger": {"text": "provide", "tokens": ["provide"], "offsets": [170]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [197]}, {"text": "qa data", "nugget_type": "DST", "argument_type": "Content", "tokens": ["qa", "data"], "offsets": [200, 201]}, {"text": "with new context", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "new", "context"], "offsets": [202, 203, 204]}], "trigger": {"text": "augment", "tokens": ["augment"], "offsets": [199]}}, {"event_type": "MDS", "arguments": [{"text": "latent features", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["latent", "features"], "offsets": [210, 211]}, {"text": "noise", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["noise"], "offsets": [207]}], "trigger": {"text": "injecting", "tokens": ["injecting"], "offsets": [206]}}, {"event_type": "FIN", "arguments": [{"text": "leads", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["leads"], "offsets": [220]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [217]}}, {"event_type": "CMP", "arguments": [{"text": "augmentation", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["augmentation"], "offsets": [219]}, {"text": "large", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["large"], "offsets": [223]}, {"text": "performance improvement", "nugget_type": "STR", "argument_type": "Result", "tokens": ["performance", "improvement"], "offsets": [224, 225]}, {"text": "strong baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["strong", "baselines"], "offsets": [227, 228]}, {"text": "on squad and tvqa", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "squad", "and", "tvqa"], "offsets": [229, 230, 231, 232]}], "trigger": {"text": "leads", "tokens": ["leads"], "offsets": [220]}}], "document": ["recent", "years", "have", "witnessed", "various", "types", "of", "generative", "models", "for", "natural", "language", "generation", "(", "nlg", ")", ",", "especially", "rnns", "or", "transformer", "based", "sequence", "-", "to", "-", "sequence", "models", ",", "as", "well", "as", "variational", "autoencoder", "(", "vae", ")", "and", "generative", "adversarial", "network", "(", "gan", ")", "based", "models", ".", "however", ",", "flow", "-", "based", "generative", "models", ",", "which", "achieve", "strong", "performance", "in", "image", "generation", "due", "to", "their", "invertibility", "and", "exact", "density", "estimation", "properties", ",", "have", "been", "less", "explored", "for", "nlg", ".", "in", "this", "paper", ",", "we", "propose", "a", "flow", "-", "based", "language", "generation", "model", "by", "adapting", "previous", "flow", "generative", "models", "to", "language", "generation", "via", "continuous", "input", "embeddings", ",", "adapted", "affine", "coupling", "structures", ",", "and", "a", "novel", "architecture", "for", "autoregressive", "text", "generation", ".", "we", "also", "apply", "our", "framework", "to", "sequence", "-", "to", "-", "sequence", "generation", ",", "including", "text", "-", "and", "video", "-", "based", "question", "generation", "(", "qg", ")", "and", "neural", "machine", "translation", "(", "nmt", ")", ",", "and", "data", "augmentation", "for", "question", "answering", "(", "qa", ")", ".", "we", "use", "our", "language", "flow", "model", "to", "provide", "extra", "input", "features", "for", "qg", "and", "nmt", ",", "which", "achieves", "improvements", "over", "the", "strong", "qg", "baselines", "on", "squad", "and", "tvqa", "and", "nmt", "baseline", "on", "wmt16", ".", "we", "also", "augment", "qa", "data", "with", "new", "context", "by", "injecting", "noise", "to", "the", "latent", "features", "of", "the", "language", "flow", "and", "show", "this", "augmentation", "leads", "to", "a", "large", "performance", "improvement", "from", "strong", "baselines", "on", "squad", "and", "tvqa", "."]}, {"venue": "ACL", "title": "Predicting Performance for Natural Language Processing Tasks", "abstract": "Given the complexity of combinations of tasks, languages, and domains in natural language processing (NLP) research, it is computationally prohibitive to exhaustively test newly proposed models on each possible experimental setting. In this work, we attempt to explore the possibility of gaining plausible judgments of how well an NLP model can perform under an experimental setting, without actually training or testing the model. To do so, we build regression models to predict the evaluation score of an NLP experiment given the experimental settings as input. Experimenting on~9 different NLP tasks, we find that our predictors can produce meaningful predictions over unseen languages and different modeling architectures, outperforming reasonable baselines as well as human experts. %we represent experimental settings using an array of features. Going further, we outline how our predictor can be used to find a small subset of representative experiments that should be run in order to obtain plausible predictions for all other experimental settings.", "doc_id": "4a3d4d514c8e5d9a8664119cf47b1c7b", "publication_year": 2020, "sentences": ["given the complexity of combinations of tasks , languages , and domains in natural language processing ( nlp ) research , it is computationally prohibitive to exhaustively test newly proposed models on each possible experimental setting .", "in this work , we attempt to explore the possibility of gaining plausible judgments of how well an nlp model can perform under an experimental setting , without actually training or testing the model .", "to do so , we build regression models to predict the evaluation score of an nlp experiment given the experimental settings as input .", "experimenting on 9 different nlp tasks , we find that our predictors can produce meaningful predictions over unseen languages and different modeling architectures , outperforming reasonable baselines as well as human experts .", "we represent experimental settings using an array of features .", "going further , we outline how our predictor can be used to find a small subset of representative experiments that should be run in order to obtain plausible predictions for all other experimental settings ."], "events": [{"event_type": "RWF", "arguments": [{"text": "computationally prohibitive", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["computationally", "prohibitive"], "offsets": [23, 24]}, {"text": "newly proposed models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["newly", "proposed", "models"], "offsets": [28, 29, 30]}, {"text": "on each possible experimental setting", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "each", "possible", "experimental", "setting"], "offsets": [31, 32, 33, 34, 35]}], "trigger": {"text": "exhaustively test", "tokens": ["exhaustively", "test"], "offsets": [26, 27]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [41]}, {"text": "possibility of gaining plausible judgments of how well an nlp model can perform under an experimental setting", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["possibility", "of", "gaining", "plausible", "judgments", "of", "how", "well", "an", "nlp", "model", "can", "perform", "under", "an", "experimental", "setting"], "offsets": [46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62]}, {"text": "without actually training or testing the model", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["without", "actually", "training", "or", "testing", "the", "model"], "offsets": [64, 65, 66, 67, 68, 69, 70]}], "trigger": {"text": "explore", "tokens": ["explore"], "offsets": [44]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [76]}, {"text": "regression models", "nugget_type": "APP", "argument_type": "Content", "tokens": ["regression", "models"], "offsets": [78, 79]}, {"text": "predict", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["predict"], "offsets": [81]}], "trigger": {"text": "build", "tokens": ["build"], "offsets": [77]}}, {"event_type": "PUR", "arguments": [{"text": "evaluation score of an nlp experiment", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["evaluation", "score", "of", "an", "nlp", "experiment"], "offsets": [83, 84, 85, 86, 87, 88]}], "trigger": {"text": "predict", "tokens": ["predict"], "offsets": [81]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [103]}, {"text": "produce", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["produce"], "offsets": [109]}, {"text": "outperforming", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforming"], "offsets": [120]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [104]}}, {"event_type": "FAC", "arguments": [{"text": "over unseen languages and different modeling architectures", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["over", "unseen", "languages", "and", "different", "modeling", "architectures"], "offsets": [112, 113, 114, 115, 116, 117, 118]}, {"text": "meaningful predictions", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["meaningful", "predictions"], "offsets": [110, 111]}, {"text": "predictors", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["predictors"], "offsets": [107]}], "trigger": {"text": "produce", "tokens": ["produce"], "offsets": [109]}}, {"event_type": "CMP", "arguments": [{"text": "outperforming", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforming"], "offsets": [120]}], "trigger": {"text": "outperforming", "tokens": ["outperforming"], "offsets": [120]}}], "document": ["given", "the", "complexity", "of", "combinations", "of", "tasks", ",", "languages", ",", "and", "domains", "in", "natural", "language", "processing", "(", "nlp", ")", "research", ",", "it", "is", "computationally", "prohibitive", "to", "exhaustively", "test", "newly", "proposed", "models", "on", "each", "possible", "experimental", "setting", ".", "in", "this", "work", ",", "we", "attempt", "to", "explore", "the", "possibility", "of", "gaining", "plausible", "judgments", "of", "how", "well", "an", "nlp", "model", "can", "perform", "under", "an", "experimental", "setting", ",", "without", "actually", "training", "or", "testing", "the", "model", ".", "to", "do", "so", ",", "we", "build", "regression", "models", "to", "predict", "the", "evaluation", "score", "of", "an", "nlp", "experiment", "given", "the", "experimental", "settings", "as", "input", ".", "experimenting", "on", "9", "different", "nlp", "tasks", ",", "we", "find", "that", "our", "predictors", "can", "produce", "meaningful", "predictions", "over", "unseen", "languages", "and", "different", "modeling", "architectures", ",", "outperforming", "reasonable", "baselines", "as", "well", "as", "human", "experts", ".", "we", "represent", "experimental", "settings", "using", "an", "array", "of", "features", ".", "going", "further", ",", "we", "outline", "how", "our", "predictor", "can", "be", "used", "to", "find", "a", "small", "subset", "of", "representative", "experiments", "that", "should", "be", "run", "in", "order", "to", "obtain", "plausible", "predictions", "for", "all", "other", "experimental", "settings", "."]}, {"venue": "ACL", "title": "Graph-to-Tree Learning for Solving Math Word Problems", "abstract": "While the recent tree-based neural models have demonstrated promising results in generating solution expression for the math word problem (MWP), most of these models do not capture the relationships and order information among the quantities well. This results in poor quantity representations and incorrect solution expressions. In this paper, we propose Graph2Tree, a novel deep learning architecture that combines the merits of the graph-based encoder and tree-based decoder to generate better solution expressions. Included in our Graph2Tree framework are two graphs, namely the Quantity Cell Graph and Quantity Comparison Graph, which are designed to address limitations of existing methods by effectively representing the relationships and order information among the quantities in MWPs. We conduct extensive experiments on two available datasets. Our experiment results show that Graph2Tree outperforms the state-of-the-art baselines on two benchmark datasets significantly. We also discuss case studies and empirically examine Graph2Tree\u2019s effectiveness in translating the MWP text into solution expressions.", "doc_id": "19eaa9435a164fe355fc4a7e577323f0", "publication_year": 2020, "sentences": ["while the recent tree - based neural models have demonstrated promising results in generating solution expression for the math word problem ( mwp ) , most of these models do not capture the relationships and order information among the quantities well .", "this results in poor quantity representations and incorrect solution expressions .", "in this paper , we propose graph2tree , a novel deep learning architecture that combines the merits of the graph - based encoder and tree - based decoder to generate better solution expressions .", "included in our graph2tree framework are two graphs , namely the quantity cell graph and quantity comparison graph , which are designed to address limitations of existing methods by effectively representing the relationships and order information among the quantities in mwps .", "we conduct extensive experiments on two available datasets .", "our experiment results show that graph2tree outperforms the state - of - the - art baselines on two benchmark datasets significantly .", "we also discuss case studies and empirically examine graph2tree \u2019 s effectiveness in translating the mwp text into solution expressions ."], "events": [{"event_type": "ITT", "arguments": [{"text": "math word problem", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["math", "word", "problem"], "offsets": [18, 19, 20]}], "trigger": {"text": "generating", "tokens": ["generating"], "offsets": [13]}}, {"event_type": "RWF", "arguments": [{"text": "not capture", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["not", "capture"], "offsets": [30, 31]}, {"text": "recent tree - based neural models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["recent", "tree", "-", "based", "neural", "models"], "offsets": [2, 3, 4, 5, 6, 7]}, {"text": "relationships", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["relationships"], "offsets": [33]}], "trigger": {"text": "not capture", "tokens": ["not", "capture"], "offsets": [30, 31]}}, {"event_type": "RWF", "arguments": [{"text": "poor quantity representations", "nugget_type": "FEA", "argument_type": "Fault", "tokens": ["poor", "quantity", "representations"], "offsets": [45, 46, 47]}, {"text": "incorrect solution expressions", "nugget_type": "FEA", "argument_type": "Fault", "tokens": ["incorrect", "solution", "expressions"], "offsets": [49, 50, 51]}], "trigger": {"text": "results", "tokens": ["results"], "offsets": [43]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [57]}, {"text": "generate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["generate"], "offsets": [82]}, {"text": "graph2tree", "nugget_type": "APP", "argument_type": "Content", "tokens": ["graph2tree"], "offsets": [59]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [58]}}, {"event_type": "PUR", "arguments": [{"text": "better solution expressions", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["better", "solution", "expressions"], "offsets": [83, 84, 85]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [82]}}, {"event_type": "MDS", "arguments": [{"text": "quantity cell graph", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["quantity", "cell", "graph"], "offsets": [98, 99, 100]}, {"text": "quantity comparison graph", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["quantity", "comparison", "graph"], "offsets": [102, 103, 104]}, {"text": "relationships", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["relationships"], "offsets": [119]}, {"text": "order information", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["order", "information"], "offsets": [121, 122]}, {"text": "address", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["address"], "offsets": [110]}], "trigger": {"text": "representing", "tokens": ["representing"], "offsets": [117]}}, {"event_type": "PUR", "arguments": [{"text": "limitations of existing methods", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["limitations", "of", "existing", "methods"], "offsets": [111, 112, 113, 114]}], "trigger": {"text": "address", "tokens": ["address"], "offsets": [110]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [129]}, {"text": "two available datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["two", "available", "datasets"], "offsets": [134, 135, 136]}, {"text": "extensive experiments", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["extensive", "experiments"], "offsets": [131, 132]}], "trigger": {"text": "conduct", "tokens": ["conduct"], "offsets": [130]}}, {"event_type": "CMP", "arguments": [{"text": "graph2tree", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["graph2tree"], "offsets": [143]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [144]}, {"text": "two benchmark datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["two", "benchmark", "datasets"], "offsets": [155, 156, 157]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [144]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [160]}, {"text": "case studies", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["case", "studies"], "offsets": [163, 164]}], "trigger": {"text": "discuss", "tokens": ["discuss"], "offsets": [162]}}, {"event_type": "WKS", "arguments": [{"text": "in translating the mwp text into solution expressions", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "translating", "the", "mwp", "text", "into", "solution", "expressions"], "offsets": [172, 173, 174, 175, 176, 177, 178, 179]}, {"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [160]}, {"text": "graph2tree \u2019 s effectiveness", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["graph2tree", "\u2019", "s", "effectiveness"], "offsets": [168, 169, 170, 171]}], "trigger": {"text": "empirically examine", "tokens": ["empirically", "examine"], "offsets": [166, 167]}}], "document": ["while", "the", "recent", "tree", "-", "based", "neural", "models", "have", "demonstrated", "promising", "results", "in", "generating", "solution", "expression", "for", "the", "math", "word", "problem", "(", "mwp", ")", ",", "most", "of", "these", "models", "do", "not", "capture", "the", "relationships", "and", "order", "information", "among", "the", "quantities", "well", ".", "this", "results", "in", "poor", "quantity", "representations", "and", "incorrect", "solution", "expressions", ".", "in", "this", "paper", ",", "we", "propose", "graph2tree", ",", "a", "novel", "deep", "learning", "architecture", "that", "combines", "the", "merits", "of", "the", "graph", "-", "based", "encoder", "and", "tree", "-", "based", "decoder", "to", "generate", "better", "solution", "expressions", ".", "included", "in", "our", "graph2tree", "framework", "are", "two", "graphs", ",", "namely", "the", "quantity", "cell", "graph", "and", "quantity", "comparison", "graph", ",", "which", "are", "designed", "to", "address", "limitations", "of", "existing", "methods", "by", "effectively", "representing", "the", "relationships", "and", "order", "information", "among", "the", "quantities", "in", "mwps", ".", "we", "conduct", "extensive", "experiments", "on", "two", "available", "datasets", ".", "our", "experiment", "results", "show", "that", "graph2tree", "outperforms", "the", "state", "-", "of", "-", "the", "-", "art", "baselines", "on", "two", "benchmark", "datasets", "significantly", ".", "we", "also", "discuss", "case", "studies", "and", "empirically", "examine", "graph2tree", "\u2019", "s", "effectiveness", "in", "translating", "the", "mwp", "text", "into", "solution", "expressions", "."]}, {"venue": "ACL", "title": "Conversational Graph Grounded Policy Learning for Open-Domain Conversation Generation", "abstract": "To address the challenge of policy learning in open-domain multi-turn conversation, we propose to represent prior information about dialog transitions as a graph and learn a graph grounded dialog policy, aimed at fostering a more coherent and controllable dialog. To this end, we first construct a conversational graph (CG) from dialog corpora, in which there are vertices to represent \u201cwhat to say\u201d and \u201chow to say\u201d, and edges to represent natural transition between a message (the last utterance in a dialog context) and its response. We then present a novel CG grounded policy learning framework that conducts dialog flow planning by graph traversal, which learns to identify a what-vertex and a how-vertex from the CG at each turn to guide response generation. In this way, we effectively leverage the CG to facilitate policy learning as follows: (1) it enables more effective long-term reward design, (2) it provides high-quality candidate actions, and (3) it gives us more control over the policy. Results on two benchmark corpora demonstrate the effectiveness of this framework.", "doc_id": "be4af5aadf1b91dbaecccfb3c2449fe9", "publication_year": 2020, "sentences": ["to address the challenge of policy learning in open - domain multi - turn conversation , we propose to represent prior information about dialog transitions as a graph and learn a graph grounded dialog policy , aimed at fostering a more coherent and controllable dialog .", "to this end , we first construct a conversational graph ( cg ) from dialog corpora , in which there are vertices to represent \u201c what to say \u201d and \u201c how to say \u201d , and edges to represent natural transition between a message ( the last utterance in a dialog context ) and its response .", "we then present a novel cg grounded policy learning framework that conducts dialog flow planning by graph traversal , which learns to identify a what - vertex and a how - vertex from the cg at each turn to guide response generation .", "in this way , we effectively leverage the cg to facilitate policy learning as follows : ( 1 ) it enables more effective long - term reward design , ( 2 ) it provides high - quality candidate actions , and ( 3 ) it gives us more control over the policy .", "results on two benchmark corpora demonstrate the effectiveness of this framework ."], "events": [{"event_type": "PUR", "arguments": [{"text": "challenge of policy learning", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["challenge", "of", "policy", "learning"], "offsets": [3, 4, 5, 6]}, {"text": "in open - domain multi - turn conversation", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "open", "-", "domain", "multi", "-", "turn", "conversation"], "offsets": [7, 8, 9, 10, 11, 12, 13, 14]}], "trigger": {"text": "address", "tokens": ["address"], "offsets": [1]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [16]}, {"text": "graph grounded dialog policy", "nugget_type": "APP", "argument_type": "Content", "tokens": ["graph", "grounded", "dialog", "policy"], "offsets": [31, 32, 33, 34]}, {"text": "fostering", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["fostering"], "offsets": [38]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [29]}}, {"event_type": "PUR", "arguments": [{"text": "more coherent dialog", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["more", "coherent", "dialog"], "offsets": [40, 41, 44]}, {"text": "more controllable dialog", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["more", "controllable", "dialog"], "offsets": [40, 43, 44]}], "trigger": {"text": "fostering", "tokens": ["fostering"], "offsets": [38]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [50]}, {"text": "conversational graph", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["conversational", "graph"], "offsets": [54, 55]}], "trigger": {"text": "construct", "tokens": ["construct"], "offsets": [52]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [104]}, {"text": "cg grounded policy learning framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["conversational", "graph", "grounded", "policy", "learning", "framework"], "offsets": [54, 55, 110, 111, 112, 113]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [106]}}, {"event_type": "FAC", "arguments": [{"text": "cg grounded policy learning framework", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["conversational", "graph", "grounded", "policy", "learning", "framework"], "offsets": [54, 55, 110, 111, 112, 113]}, {"text": "two benchmark corpora", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["two", "benchmark", "corpora"], "offsets": [202, 203, 204]}, {"text": "effectiveness of this framework", "nugget_type": "STR", "argument_type": "Subject", "tokens": ["effectiveness", "of", "this", "framework"], "offsets": [207, 208, 209, 210]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [205]}}, {"event_type": "MDS", "arguments": [{"text": "address", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["address"], "offsets": [1]}, {"text": "prior information about dialog transitions", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["prior", "information", "about", "dialog", "transitions"], "offsets": [20, 21, 22, 23, 24]}, {"text": "graph", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["graph"], "offsets": [27]}], "trigger": {"text": "represent", "tokens": ["represent"], "offsets": [19]}}, {"event_type": "MDS", "arguments": [{"text": "graph traversal", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["graph", "traversal"], "offsets": [120, 121]}, {"text": "dialog flow planning", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["dialog", "flow", "planning"], "offsets": [116, 117, 118]}], "trigger": {"text": "conducts", "tokens": ["conducts"], "offsets": [115]}}, {"event_type": "MDS", "arguments": [{"text": "what - vertex", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["what", "-", "vertex"], "offsets": [128, 129, 130]}, {"text": "how - vertex", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["how", "-", "vertex"], "offsets": [133, 134, 135]}, {"text": "cg at each turn", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["conversational", "graph", "at", "each", "turn"], "offsets": [54, 55, 139, 140, 141]}, {"text": "guide", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["guide"], "offsets": [143]}], "trigger": {"text": "identify", "tokens": ["identify"], "offsets": [126]}}, {"event_type": "PUR", "arguments": [{"text": "response generation", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["response", "generation"], "offsets": [144, 145]}], "trigger": {"text": "guide", "tokens": ["guide"], "offsets": [143]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [151]}, {"text": "cg", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["conversational", "graph"], "offsets": [54, 55]}, {"text": "facilitate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["facilitate"], "offsets": [157]}], "trigger": {"text": "effectively leverage", "tokens": ["effectively", "leverage"], "offsets": [152, 153]}}, {"event_type": "PUR", "arguments": [{"text": "policy learning", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["policy", "learning"], "offsets": [158, 159]}], "trigger": {"text": "facilitate", "tokens": ["facilitate"], "offsets": [157]}}], "document": ["to", "address", "the", "challenge", "of", "policy", "learning", "in", "open", "-", "domain", "multi", "-", "turn", "conversation", ",", "we", "propose", "to", "represent", "prior", "information", "about", "dialog", "transitions", "as", "a", "graph", "and", "learn", "a", "graph", "grounded", "dialog", "policy", ",", "aimed", "at", "fostering", "a", "more", "coherent", "and", "controllable", "dialog", ".", "to", "this", "end", ",", "we", "first", "construct", "a", "conversational", "graph", "(", "cg", ")", "from", "dialog", "corpora", ",", "in", "which", "there", "are", "vertices", "to", "represent", "\u201c", "what", "to", "say", "\u201d", "and", "\u201c", "how", "to", "say", "\u201d", ",", "and", "edges", "to", "represent", "natural", "transition", "between", "a", "message", "(", "the", "last", "utterance", "in", "a", "dialog", "context", ")", "and", "its", "response", ".", "we", "then", "present", "a", "novel", "cg", "grounded", "policy", "learning", "framework", "that", "conducts", "dialog", "flow", "planning", "by", "graph", "traversal", ",", "which", "learns", "to", "identify", "a", "what", "-", "vertex", "and", "a", "how", "-", "vertex", "from", "the", "cg", "at", "each", "turn", "to", "guide", "response", "generation", ".", "in", "this", "way", ",", "we", "effectively", "leverage", "the", "cg", "to", "facilitate", "policy", "learning", "as", "follows", ":", "(", "1", ")", "it", "enables", "more", "effective", "long", "-", "term", "reward", "design", ",", "(", "2", ")", "it", "provides", "high", "-", "quality", "candidate", "actions", ",", "and", "(", "3", ")", "it", "gives", "us", "more", "control", "over", "the", "policy", ".", "results", "on", "two", "benchmark", "corpora", "demonstrate", "the", "effectiveness", "of", "this", "framework", "."]}, {"venue": "ACL", "title": "Bayes Test of Precision, Recall, and F1 Measure for Comparison of Two Natural Language Processing Models", "abstract": "Direct comparison on point estimation of the precision (P), recall (R), and F1 measure of two natural language processing (NLP) models on a common test corpus is unreasonable and results in less replicable conclusions due to a lack of a statistical test. However, the existing t-tests in cross-validation (CV) for model comparison are inappropriate because the distributions of P, R, F1 are skewed and an interval estimation of P, R, and F1 based on a t-test may exceed [0,1]. In this study, we propose to use a block-regularized 3\u00d72 CV (3\u00d72 BCV) in model comparison because it could regularize the difference in certain frequency distributions over linguistic units between training and validation sets and yield stable estimators of P, R, and F1. On the basis of the 3\u00d72 BCV, we calibrate the posterior distributions of P, R, and F1 and derive an accurate interval estimation of P, R, and F1. Furthermore, we formulate the comparison into a hypothesis testing problem and propose a novel Bayes test. The test could directly compute the probabilities of the hypotheses on the basis of the posterior distributions and provide more informative decisions than the existing significance t-tests. Three experiments with regard to NLP chunking tasks are conducted, and the results illustrate the validity of the Bayes test.", "doc_id": "4cd64e8227eab721504123c65a97c3c3", "publication_year": 2019, "sentences": ["direct comparison on point estimation of the precision ( p ) , recall ( r ) , and f1 measure of two natural language processing ( nlp ) models on a common test corpus is unreasonable and results in less replicable conclusions due to a lack of a statistical test .", "however , the existing t - tests in cross - validation ( cv ) for model comparison are inappropriate because the distributions of p , r , f1 are skewed and an interval estimation of p , r , and f1 based on a t - test may exceed [ 0 , 1 ] .", "in this study , we propose to use a block - regularized 3\u00d72 cv ( 3\u00d72 bcv ) in model comparison because it could regularize the difference in certain frequency distributions over linguistic units between training and validation sets and yield stable estimators of p , r , and f1 .", "on the basis of the 3\u00d72 bcv , we calibrate the posterior distributions of p , r , and f1 and derive an accurate interval estimation of p , r , and f1 .", "furthermore , we formulate the comparison into a hypothesis testing problem and propose a novel bayes test .", "the test could directly compute the probabilities of the hypotheses on the basis of the posterior distributions and provide more informative decisions than the existing significance t - tests .", "three experiments with regard to nlp chunking tasks are conducted , and the results illustrate the validity of the bayes test ."], "events": [{"event_type": "RWF", "arguments": [{"text": "lack of a statistical test", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["lack", "of", "a", "statistical", "test"], "offsets": [45, 46, 47, 48, 49]}, {"text": "less replicable conclusions", "nugget_type": "WEA", "argument_type": "Target", "tokens": ["less", "replicable", "conclusions"], "offsets": [39, 40, 41]}], "trigger": {"text": "results", "tokens": ["results"], "offsets": [37]}}, {"event_type": "RWF", "arguments": [{"text": "in cross - validation", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "cross", "-", "validation"], "offsets": [58, 59, 60, 61]}, {"text": "existing t - tests", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["existing", "t", "-", "tests"], "offsets": [54, 55, 56, 57]}, {"text": "inappropriate", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["inappropriate"], "offsets": [69]}], "trigger": {"text": "inappropriate", "tokens": ["inappropriate"], "offsets": [69]}}, {"event_type": "MDS", "arguments": [{"text": "block - regularized 3\u00d72 cv", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["3\u00d72", "bcv"], "offsets": [162, 163]}, {"text": "in model comparison", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "model", "comparison"], "offsets": [124, 125, 126]}, {"text": "regularize", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["regularize"], "offsets": [130]}, {"text": "yield", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["yield"], "offsets": [146]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [113]}}, {"event_type": "PUR", "arguments": [{"text": "over linguistic units between training and validation sets", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["over", "linguistic", "units", "between", "training", "and", "validation", "sets"], "offsets": [137, 138, 139, 140, 141, 142, 143, 144]}, {"text": "difference in certain frequency distributions", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["difference", "in", "certain", "frequency", "distributions"], "offsets": [132, 133, 134, 135, 136]}], "trigger": {"text": "regularize", "tokens": ["regularize"], "offsets": [130]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [165]}, {"text": "on the basis of the 3\u00d72 bcv", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "the", "basis", "of", "the", "3\u00d72", "bcv"], "offsets": [157, 158, 159, 160, 161, 162, 163]}, {"text": "posterior distributions of p", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["posterior", "distributions", "of", "p"], "offsets": [168, 169, 170, 171]}, {"text": "posterior distributions r", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["posterior", "distributions", "r"], "offsets": [168, 169, 173]}, {"text": "posterior distributions f1", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["posterior", "distributions", "f1"], "offsets": [168, 169, 176]}], "trigger": {"text": "calibrate", "tokens": ["calibrate"], "offsets": [166]}}, {"event_type": "WKS", "arguments": [{"text": "on the basis of the 3\u00d72 bcv", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "the", "basis", "of", "the", "3\u00d72", "bcv"], "offsets": [157, 158, 159, 160, 161, 162, 163]}, {"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [165]}, {"text": "accurate interval estimation of p", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["accurate", "interval", "estimation", "of", "p"], "offsets": [180, 181, 182, 183, 184]}, {"text": "accurate interval estimation r", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["accurate", "interval", "estimation", "r"], "offsets": [180, 181, 182, 186]}, {"text": "accurate interval estimation f1", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["accurate", "interval", "estimation", "f1"], "offsets": [180, 181, 182, 189]}], "trigger": {"text": "derive", "tokens": ["derive"], "offsets": [178]}}, {"event_type": "CMP", "arguments": [{"text": "existing significance t - tests", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["existing", "significance", "t", "-", "tests"], "offsets": [233, 234, 235, 236, 237]}, {"text": "more informative decisions", "nugget_type": "STR", "argument_type": "Result", "tokens": ["more", "informative", "decisions"], "offsets": [228, 229, 230]}, {"text": "bayes test", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["bayes", "test"], "offsets": [206, 207]}], "trigger": {"text": "provide", "tokens": ["provide"], "offsets": [227]}}, {"event_type": "PUR", "arguments": [{"text": "stable estimators of p , r , and f1", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["stable", "estimators", "of", "p", ",", "r", ",", "and", "f1"], "offsets": [147, 148, 149, 150, 151, 152, 153, 154, 155]}], "trigger": {"text": "yield", "tokens": ["yield"], "offsets": [146]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [193]}, {"text": "hypothesis testing problem", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["hypothesis", "testing", "problem"], "offsets": [199, 200, 201]}], "trigger": {"text": "formulate", "tokens": ["formulate"], "offsets": [194]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [193]}, {"text": "bayes test", "nugget_type": "APP", "argument_type": "Content", "tokens": ["bayes", "test"], "offsets": [206, 207]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [203]}}, {"event_type": "MDS", "arguments": [{"text": "basis of the posterior distributions", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["basis", "of", "the", "posterior", "distributions"], "offsets": [221, 222, 223, 224, 225]}, {"text": "probabilities of the hypotheses", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["probabilities", "of", "the", "hypotheses"], "offsets": [215, 216, 217, 218]}], "trigger": {"text": "directly compute", "tokens": ["directly", "compute"], "offsets": [212, 213]}}], "document": ["direct", "comparison", "on", "point", "estimation", "of", "the", "precision", "(", "p", ")", ",", "recall", "(", "r", ")", ",", "and", "f1", "measure", "of", "two", "natural", "language", "processing", "(", "nlp", ")", "models", "on", "a", "common", "test", "corpus", "is", "unreasonable", "and", "results", "in", "less", "replicable", "conclusions", "due", "to", "a", "lack", "of", "a", "statistical", "test", ".", "however", ",", "the", "existing", "t", "-", "tests", "in", "cross", "-", "validation", "(", "cv", ")", "for", "model", "comparison", "are", "inappropriate", "because", "the", "distributions", "of", "p", ",", "r", ",", "f1", "are", "skewed", "and", "an", "interval", "estimation", "of", "p", ",", "r", ",", "and", "f1", "based", "on", "a", "t", "-", "test", "may", "exceed", "[", "0", ",", "1", "]", ".", "in", "this", "study", ",", "we", "propose", "to", "use", "a", "block", "-", "regularized", "3\u00d72", "cv", "(", "3\u00d72", "bcv", ")", "in", "model", "comparison", "because", "it", "could", "regularize", "the", "difference", "in", "certain", "frequency", "distributions", "over", "linguistic", "units", "between", "training", "and", "validation", "sets", "and", "yield", "stable", "estimators", "of", "p", ",", "r", ",", "and", "f1", ".", "on", "the", "basis", "of", "the", "3\u00d72", "bcv", ",", "we", "calibrate", "the", "posterior", "distributions", "of", "p", ",", "r", ",", "and", "f1", "and", "derive", "an", "accurate", "interval", "estimation", "of", "p", ",", "r", ",", "and", "f1", ".", "furthermore", ",", "we", "formulate", "the", "comparison", "into", "a", "hypothesis", "testing", "problem", "and", "propose", "a", "novel", "bayes", "test", ".", "the", "test", "could", "directly", "compute", "the", "probabilities", "of", "the", "hypotheses", "on", "the", "basis", "of", "the", "posterior", "distributions", "and", "provide", "more", "informative", "decisions", "than", "the", "existing", "significance", "t", "-", "tests", ".", "three", "experiments", "with", "regard", "to", "nlp", "chunking", "tasks", "are", "conducted", ",", "and", "the", "results", "illustrate", "the", "validity", "of", "the", "bayes", "test", "."]}, {"venue": "ACL", "title": "R\u02c63: Reverse, Retrieve, and Rank for Sarcasm Generation with Commonsense Knowledge", "abstract": "We propose an unsupervised approach for sarcasm generation based on a non-sarcastic input sentence. Our method employs a retrieve-and-edit framework to instantiate two major characteristics of sarcasm: reversal of valence and semantic incongruity with the context, which could include shared commonsense or world knowledge between the speaker and the listener. While prior works on sarcasm generation predominantly focus on context incongruity, we show that combining valence reversal and semantic incongruity based on the commonsense knowledge generates sarcasm of higher quality. Human evaluation shows that our system generates sarcasm better than humans 34% of the time, and better than a reinforced hybrid baseline 90% of the time.", "doc_id": "be50c5d1339dabd34855cd1eeca4a35c", "publication_year": 2020, "sentences": ["we propose an unsupervised approach for sarcasm generation based on a non - sarcastic input sentence .", "our method employs a retrieve - and - edit framework to instantiate two major characteristics of sarcasm : reversal of valence and semantic incongruity with the context , which could include shared commonsense or world knowledge between the speaker and the listener .", "while prior works on sarcasm generation predominantly focus on context incongruity , we show that combining valence reversal and semantic incongruity based on the commonsense knowledge generates sarcasm of higher quality .", "human evaluation shows that our system generates sarcasm better than humans 34 % of the time , and better than a reinforced hybrid baseline 90 % of the time ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [0]}, {"text": "unsupervised approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["unsupervised", "approach"], "offsets": [3, 4]}, {"text": "sarcasm generation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["sarcasm", "generation"], "offsets": [6, 7]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [1]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [72]}, {"text": "generates", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["generates"], "offsets": [86]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [73]}}, {"event_type": "FAC", "arguments": [{"text": "combining valence reversal and semantic incongruity", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["combining", "valence", "reversal", "and", "semantic", "incongruity"], "offsets": [75, 76, 77, 78, 79, 80]}, {"text": "sarcasm of higher quality", "nugget_type": "STR", "argument_type": "Object", "tokens": ["sarcasm", "of", "higher", "quality"], "offsets": [87, 88, 89, 90]}], "trigger": {"text": "generates", "tokens": ["generates"], "offsets": [86]}}, {"event_type": "FIN", "arguments": [{"text": "generates", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["generates"], "offsets": [98]}, {"text": "better", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["better"], "offsets": [110]}], "trigger": {"text": "shows", "tokens": ["shows"], "offsets": [94]}}, {"event_type": "CMP", "arguments": [{"text": "unsupervised approach", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["unsupervised", "approach"], "offsets": [3, 4]}, {"text": "humans", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["humans"], "offsets": [102]}, {"text": "sarcasm", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["sarcasm"], "offsets": [99]}, {"text": "better", "nugget_type": "STR", "argument_type": "Result", "tokens": ["better"], "offsets": [100]}, {"text": "34 % of the time", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["34", "%", "of", "the", "time"], "offsets": [103, 104, 105, 106, 107]}], "trigger": {"text": "generates", "tokens": ["generates"], "offsets": [98]}}, {"event_type": "CMP", "arguments": [{"text": "unsupervised approach", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["unsupervised", "approach"], "offsets": [3, 4]}, {"text": "reinforced hybrid baseline", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["reinforced", "hybrid", "baseline"], "offsets": [113, 114, 115]}, {"text": "better", "nugget_type": "STR", "argument_type": "Result", "tokens": ["better"], "offsets": [110]}], "trigger": {"text": "better", "tokens": ["better"], "offsets": [110]}}, {"event_type": "WKS", "arguments": [{"text": "instantiate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["instantiate"], "offsets": [28]}, {"text": "retrieve - framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["retrieve", "-", "framework"], "offsets": [21, 22, 26]}, {"text": "- edit framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["-", "edit", "framework"], "offsets": [24, 25, 26]}], "trigger": {"text": "employs", "tokens": ["employs"], "offsets": [19]}}, {"event_type": "PUR", "arguments": [{"text": "two major characteristics of sarcasm", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["two", "major", "characteristics", "of", "sarcasm"], "offsets": [29, 30, 31, 32, 33]}], "trigger": {"text": "instantiate", "tokens": ["instantiate"], "offsets": [28]}}, {"event_type": "MDS", "arguments": [{"text": "instantiate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["instantiate"], "offsets": [28]}, {"text": "retrieve - framework", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["retrieve", "-", "framework"], "offsets": [21, 22, 26]}, {"text": "- edit framework", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["-", "edit", "framework"], "offsets": [24, 25, 26]}], "trigger": {"text": "employs", "tokens": ["employs"], "offsets": [19]}}, {"event_type": "PUR", "arguments": [{"text": "reversal of valence", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["reversal", "of", "valence"], "offsets": [35, 36, 37]}, {"text": "semantic incongruity with the context", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["semantic", "incongruity", "with", "the", "context"], "offsets": [39, 40, 41, 42, 43]}], "trigger": {"text": "instantiate", "tokens": ["instantiate"], "offsets": [28]}}], "document": ["we", "propose", "an", "unsupervised", "approach", "for", "sarcasm", "generation", "based", "on", "a", "non", "-", "sarcastic", "input", "sentence", ".", "our", "method", "employs", "a", "retrieve", "-", "and", "-", "edit", "framework", "to", "instantiate", "two", "major", "characteristics", "of", "sarcasm", ":", "reversal", "of", "valence", "and", "semantic", "incongruity", "with", "the", "context", ",", "which", "could", "include", "shared", "commonsense", "or", "world", "knowledge", "between", "the", "speaker", "and", "the", "listener", ".", "while", "prior", "works", "on", "sarcasm", "generation", "predominantly", "focus", "on", "context", "incongruity", ",", "we", "show", "that", "combining", "valence", "reversal", "and", "semantic", "incongruity", "based", "on", "the", "commonsense", "knowledge", "generates", "sarcasm", "of", "higher", "quality", ".", "human", "evaluation", "shows", "that", "our", "system", "generates", "sarcasm", "better", "than", "humans", "34", "%", "of", "the", "time", ",", "and", "better", "than", "a", "reinforced", "hybrid", "baseline", "90", "%", "of", "the", "time", "."]}, {"venue": "ACL", "title": "Incorporating Hierarchy into Text Encoder: a Contrastive Learning Approach for Hierarchical Text Classification", "abstract": "Hierarchical text classification is a challenging subtask of multi-label classification due to its complex label hierarchy. Existing methods encode text and label hierarchy separately and mix their representations for classification, where the hierarchy remains unchanged for all input text. Instead of modeling them separately, in this work, we propose Hierarchy-guided Contrastive Learning (HGCLR) to directly embed the hierarchy into a text encoder. During training, HGCLR constructs positive samples for input text under the guidance of the label hierarchy. By pulling together the input text and its positive sample, the text encoder can learn to generate the hierarchy-aware text representation independently. Therefore, after training, the HGCLR enhanced text encoder can dispense with the redundant hierarchy. Extensive experiments on three benchmark datasets verify the effectiveness of HGCLR.", "doc_id": "8ca3fe510ad2a635d2ac6bdb82fc7b6f", "publication_year": 2022, "sentences": ["hierarchical text classification is a challenging subtask of multi - label classification due to its complex label hierarchy .", "existing methods encode text and label hierarchy separately and mix their representations for classification , where the hierarchy remains unchanged for all input text .", "instead of modeling them separately , in this work , we propose hierarchy - guided contrastive learning ( hgclr ) to directly embed the hierarchy into a text encoder .", "during training , hgclr constructs positive samples for input text under the guidance of the label hierarchy .", "by pulling together the input text and its positive sample , the text encoder can learn to generate the hierarchy - aware text representation independently .", "therefore , after training , the hgclr enhanced text encoder can dispense with the redundant hierarchy .", "extensive experiments on three benchmark datasets verify the effectiveness of hgclr ."], "events": [{"event_type": "ITT", "arguments": [{"text": "hierarchical text classification", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["hierarchical", "text", "classification"], "offsets": [0, 1, 2]}], "trigger": {"text": "subtask", "tokens": ["subtask"], "offsets": [6]}}, {"event_type": "RWS", "arguments": [{"text": "separately", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["separately"], "offsets": [26]}, {"text": "existing methods", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["existing", "methods"], "offsets": [19, 20]}, {"text": "text hierarchy", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["text", "hierarchy"], "offsets": [22, 25]}, {"text": "label hierarchy", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["label", "hierarchy"], "offsets": [24, 25]}], "trigger": {"text": "encode", "tokens": ["encode"], "offsets": [21]}}, {"event_type": "RWS", "arguments": [{"text": "existing methods", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["existing", "methods"], "offsets": [19, 20]}, {"text": "their representations", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["text", "label", "representations"], "offsets": [22, 24, 30]}, {"text": "classification", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["classification"], "offsets": [32]}], "trigger": {"text": "mix", "tokens": ["mix"], "offsets": [28]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [54]}, {"text": "hierarchy - guided contrastive learning", "nugget_type": "APP", "argument_type": "Content", "tokens": ["hierarchy", "-", "guided", "contrastive", "learning"], "offsets": [56, 57, 58, 59, 60]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [55]}}, {"event_type": "MDS", "arguments": [{"text": "positive samples", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["positive", "samples"], "offsets": [79, 80]}, {"text": "under the guidance of the label hierarchy", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["under", "the", "guidance", "of", "the", "label", "hierarchy"], "offsets": [84, 85, 86, 87, 88, 89, 90]}, {"text": "hierarchy - guided contrastive learning", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["hierarchy", "-", "guided", "contrastive", "learning"], "offsets": [56, 57, 58, 59, 60]}, {"text": "input text", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["input", "text"], "offsets": [82, 83]}], "trigger": {"text": "constructs", "tokens": ["constructs"], "offsets": [78]}}, {"event_type": "MDS", "arguments": [{"text": "text encoder", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["text", "encoder"], "offsets": [104, 105]}, {"text": "hierarchy - aware text representation", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["hierarchy", "-", "aware", "text", "representation"], "offsets": [111, 112, 113, 114, 115]}, {"text": "by pulling together the input text and its positive sample", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["by", "pulling", "together", "the", "input", "text", "and", "input", "text", "positive", "sample"], "offsets": [92, 93, 94, 95, 96, 97, 98, 96, 97, 100, 101]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [109]}}, {"event_type": "FAC", "arguments": [{"text": "hgclr enhanced text encoder", "nugget_type": "MOD", "argument_type": "Subject", "tokens": ["hierarchy", "-", "guided", "contrastive", "learning", "enhanced", "text", "encoder"], "offsets": [56, 57, 58, 59, 60, 125, 126, 127]}, {"text": "after training", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["after", "training"], "offsets": [120, 121]}, {"text": "redundant hierarchy", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["redundant", "hierarchy"], "offsets": [132, 133]}], "trigger": {"text": "dispense", "tokens": ["dispense"], "offsets": [129]}}, {"event_type": "FAC", "arguments": [{"text": "three benchmark datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["three", "benchmark", "datasets"], "offsets": [138, 139, 140]}, {"text": "effectiveness of hgclr", "nugget_type": "STR", "argument_type": "Object", "tokens": ["effectiveness", "of", "hierarchy", "-", "guided", "contrastive", "learning"], "offsets": [143, 144, 56, 57, 58, 59, 60]}], "trigger": {"text": "verify", "tokens": ["verify"], "offsets": [141]}}, {"event_type": "MDS", "arguments": [{"text": "hierarchy", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["hierarchy"], "offsets": [68]}, {"text": "text encoder", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["text", "encoder"], "offsets": [71, 72]}], "trigger": {"text": "directly embed", "tokens": ["directly", "embed"], "offsets": [65, 66]}}], "document": ["hierarchical", "text", "classification", "is", "a", "challenging", "subtask", "of", "multi", "-", "label", "classification", "due", "to", "its", "complex", "label", "hierarchy", ".", "existing", "methods", "encode", "text", "and", "label", "hierarchy", "separately", "and", "mix", "their", "representations", "for", "classification", ",", "where", "the", "hierarchy", "remains", "unchanged", "for", "all", "input", "text", ".", "instead", "of", "modeling", "them", "separately", ",", "in", "this", "work", ",", "we", "propose", "hierarchy", "-", "guided", "contrastive", "learning", "(", "hgclr", ")", "to", "directly", "embed", "the", "hierarchy", "into", "a", "text", "encoder", ".", "during", "training", ",", "hgclr", "constructs", "positive", "samples", "for", "input", "text", "under", "the", "guidance", "of", "the", "label", "hierarchy", ".", "by", "pulling", "together", "the", "input", "text", "and", "its", "positive", "sample", ",", "the", "text", "encoder", "can", "learn", "to", "generate", "the", "hierarchy", "-", "aware", "text", "representation", "independently", ".", "therefore", ",", "after", "training", ",", "the", "hgclr", "enhanced", "text", "encoder", "can", "dispense", "with", "the", "redundant", "hierarchy", ".", "extensive", "experiments", "on", "three", "benchmark", "datasets", "verify", "the", "effectiveness", "of", "hgclr", "."]}, {"venue": "ACL", "title": "Measuring Forecasting Skill from Text", "abstract": "People vary in their ability to make accurate predictions about the future. Prior studies have shown that some individuals can predict the outcome of future events with consistently better accuracy. This leads to a natural question: what makes some forecasters better than others? In this paper we explore connections between the language people use to describe their predictions and their forecasting skill. Datasets from two different forecasting domains are explored: (1) geopolitical forecasts from Good Judgment Open, an online prediction forum and (2) a corpus of company earnings forecasts made by financial analysts. We present a number of linguistic metrics which are computed over text associated with people\u2019s predictions about the future including: uncertainty, readability, and emotion. By studying linguistic factors associated with predictions, we are able to shed some light on the approach taken by skilled forecasters. Furthermore, we demonstrate that it is possible to accurately predict forecasting skill using a model that is based solely on language. This could potentially be useful for identifying accurate predictions or potentially skilled forecasters earlier.", "doc_id": "146ebe812bebc478cfed6f391d052ad8", "publication_year": 2020, "sentences": ["people vary in their ability to make accurate predictions about the future .", "prior studies have shown that some individuals can predict the outcome of future events with consistently better accuracy .", "this leads to a natural question : what makes some forecasters better than others ?", "in this paper we explore connections between the language people use to describe their predictions and their forecasting skill .", "datasets from two different forecasting domains are explored : ( 1 ) geopolitical forecasts from good judgment open , an online prediction forum and ( 2 ) a corpus of company earnings forecasts made by financial analysts .", "we present a number of linguistic metrics which are computed over text associated with people \u2019 s predictions about the future including : uncertainty , readability , and emotion .", "by studying linguistic factors associated with predictions , we are able to shed some light on the approach taken by skilled forecasters .", "furthermore , we demonstrate that it is possible to accurately predict forecasting skill using a model that is based solely on language .", "this could potentially be useful for identifying accurate predictions or potentially skilled forecasters earlier ."], "events": [{"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [50]}, {"text": "connections between the language people use to describe their predictions", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["connections", "between", "the", "language", "people", "use", "to", "describe", "people", "predictions"], "offsets": [52, 53, 54, 55, 56, 57, 58, 59, 56, 61]}, {"text": "their forecasting skill", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["people", "forecasting", "skill"], "offsets": [56, 64, 65]}], "trigger": {"text": "explore", "tokens": ["explore"], "offsets": [51]}}, {"event_type": "WKS", "arguments": [{"text": "datasets from two different forecasting domains", "nugget_type": "DST", "argument_type": "Content", "tokens": ["datasets", "from", "two", "different", "forecasting", "domains"], "offsets": [67, 68, 69, 70, 71, 72]}], "trigger": {"text": "explored", "tokens": ["explored"], "offsets": [74]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [105]}, {"text": "linguistic metrics", "nugget_type": "APP", "argument_type": "Content", "tokens": ["linguistic", "metrics"], "offsets": [110, 111]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [106]}}, {"event_type": "MDS", "arguments": [{"text": "text associated with people \u2019 s predictions about the future", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["text", "associated", "with", "people", "\u2019", "s", "predictions", "about", "the", "future"], "offsets": [116, 117, 118, 119, 120, 121, 122, 123, 124, 125]}], "trigger": {"text": "computed", "tokens": ["computed"], "offsets": [114]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [160]}, {"text": "accurately predict", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["accurately", "predict"], "offsets": [167, 168]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [161]}}, {"event_type": "FAC", "arguments": [{"text": "model that is based solely on language", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["model", "that", "is", "based", "solely", "on", "language"], "offsets": [173, 174, 175, 176, 177, 178, 179]}, {"text": "forecasting skill", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["forecasting", "skill"], "offsets": [169, 170]}], "trigger": {"text": "accurately predict", "tokens": ["accurately", "predict"], "offsets": [167, 168]}}], "document": ["people", "vary", "in", "their", "ability", "to", "make", "accurate", "predictions", "about", "the", "future", ".", "prior", "studies", "have", "shown", "that", "some", "individuals", "can", "predict", "the", "outcome", "of", "future", "events", "with", "consistently", "better", "accuracy", ".", "this", "leads", "to", "a", "natural", "question", ":", "what", "makes", "some", "forecasters", "better", "than", "others", "?", "in", "this", "paper", "we", "explore", "connections", "between", "the", "language", "people", "use", "to", "describe", "their", "predictions", "and", "their", "forecasting", "skill", ".", "datasets", "from", "two", "different", "forecasting", "domains", "are", "explored", ":", "(", "1", ")", "geopolitical", "forecasts", "from", "good", "judgment", "open", ",", "an", "online", "prediction", "forum", "and", "(", "2", ")", "a", "corpus", "of", "company", "earnings", "forecasts", "made", "by", "financial", "analysts", ".", "we", "present", "a", "number", "of", "linguistic", "metrics", "which", "are", "computed", "over", "text", "associated", "with", "people", "\u2019", "s", "predictions", "about", "the", "future", "including", ":", "uncertainty", ",", "readability", ",", "and", "emotion", ".", "by", "studying", "linguistic", "factors", "associated", "with", "predictions", ",", "we", "are", "able", "to", "shed", "some", "light", "on", "the", "approach", "taken", "by", "skilled", "forecasters", ".", "furthermore", ",", "we", "demonstrate", "that", "it", "is", "possible", "to", "accurately", "predict", "forecasting", "skill", "using", "a", "model", "that", "is", "based", "solely", "on", "language", ".", "this", "could", "potentially", "be", "useful", "for", "identifying", "accurate", "predictions", "or", "potentially", "skilled", "forecasters", "earlier", "."]}, {"venue": "ACL", "title": "Good for Misconceived Reasons: An Empirical Revisiting on the Need for Visual Context in Multimodal Machine Translation", "abstract": "A neural multimodal machine translation (MMT) system is one that aims to perform better translation by extending conventional text-only translation models with multimodal information. Many recent studies report improvements when equipping their models with the multimodal module, despite the controversy of whether such improvements indeed come from the multimodal part. We revisit the contribution of multimodal information in MMT by devising two interpretable MMT models. To our surprise, although our models replicate similar gains as recently developed multimodal-integrated systems achieved, our models learn to ignore the multimodal information. Upon further investigation, we discover that the improvements achieved by the multimodal models over text-only counterparts are in fact results of the regularization effect. We report empirical findings that highlight the importance of MMT models\u2019 interpretability, and discuss how our findings will benefit future research.", "doc_id": "dd215548506a1ae5fcecf784824da8fa", "publication_year": 2021, "sentences": ["a neural multimodal machine translation ( mmt ) system is one that aims to perform better translation by extending conventional text - only translation models with multimodal information .", "many recent studies report improvements when equipping their models with the multimodal module , despite the controversy of whether such improvements indeed come from the multimodal part .", "we revisit the contribution of multimodal information in mmt by devising two interpretable mmt models .", "to our surprise , although our models replicate similar gains as recently developed multimodal - integrated systems achieved , our models learn to ignore the multimodal information .", "upon further investigation , we discover that the improvements achieved by the multimodal models over text - only counterparts are in fact results of the regularization effect .", "we report empirical findings that highlight the importance of", "mmt models \u2019 interpretability , and discuss how our findings will benefit future research ."], "events": [{"event_type": "ITT", "arguments": [{"text": "neural multimodal machine translation ( mmt ) system", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["neural", "multimodal", "machine", "translation", "system"], "offsets": [1, 2, 3, 4, 8]}], "trigger": {"text": "perform", "tokens": ["perform"], "offsets": [14]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [57]}, {"text": "two interpretable mmt models", "nugget_type": "APP", "argument_type": "Content", "tokens": ["two", "interpretable", "multimodal", "machine", "translation", "models"], "offsets": [68, 69, 2, 3, 4, 71]}, {"text": "revisit", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["revisit"], "offsets": [58]}], "trigger": {"text": "devising", "tokens": ["devising"], "offsets": [67]}}, {"event_type": "PUR", "arguments": [{"text": "contribution of multimodal information", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["contribution", "of", "multimodal", "information"], "offsets": [60, 61, 62, 63]}, {"text": "in mmt", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "multimodal", "machine", "translation"], "offsets": [64, 2, 3, 4]}], "trigger": {"text": "revisit", "tokens": ["revisit"], "offsets": [58]}}, {"event_type": "WKS", "arguments": [{"text": "similar gains", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["similar", "gains"], "offsets": [81, 82]}, {"text": "as recently developed multimodal - integrated systems achieved", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["as", "recently", "developed", "multimodal", "-", "integrated", "systems", "achieved"], "offsets": [83, 84, 85, 86, 87, 88, 89, 90]}], "trigger": {"text": "replicate", "tokens": ["replicate"], "offsets": [80]}}, {"event_type": "WKS", "arguments": [{"text": "multimodal information", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["multimodal", "information"], "offsets": [98, 99]}], "trigger": {"text": "ignore", "tokens": ["ignore"], "offsets": [96]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [105]}, {"text": "results of the regularization effect", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["results", "of", "the", "regularization", "effect"], "offsets": [123, 124, 125, 126, 127]}], "trigger": {"text": "discover", "tokens": ["discover"], "offsets": [106]}}, {"event_type": "FAC", "arguments": [{"text": "improvements", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["improvements"], "offsets": [109]}, {"text": "achieved by the multimodal models over text - only counterparts", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["achieved", "by", "the", "multimodal", "models", "over", "text", "-", "only", "counterparts"], "offsets": [110, 111, 112, 113, 114, 115, 116, 117, 118, 119]}], "trigger": {"text": "results of the regularization effect", "tokens": ["results", "of", "the", "regularization", "effect"], "offsets": [123, 124, 125, 126, 127]}}], "document": ["a", "neural", "multimodal", "machine", "translation", "(", "mmt", ")", "system", "is", "one", "that", "aims", "to", "perform", "better", "translation", "by", "extending", "conventional", "text", "-", "only", "translation", "models", "with", "multimodal", "information", ".", "many", "recent", "studies", "report", "improvements", "when", "equipping", "their", "models", "with", "the", "multimodal", "module", ",", "despite", "the", "controversy", "of", "whether", "such", "improvements", "indeed", "come", "from", "the", "multimodal", "part", ".", "we", "revisit", "the", "contribution", "of", "multimodal", "information", "in", "mmt", "by", "devising", "two", "interpretable", "mmt", "models", ".", "to", "our", "surprise", ",", "although", "our", "models", "replicate", "similar", "gains", "as", "recently", "developed", "multimodal", "-", "integrated", "systems", "achieved", ",", "our", "models", "learn", "to", "ignore", "the", "multimodal", "information", ".", "upon", "further", "investigation", ",", "we", "discover", "that", "the", "improvements", "achieved", "by", "the", "multimodal", "models", "over", "text", "-", "only", "counterparts", "are", "in", "fact", "results", "of", "the", "regularization", "effect", ".", "we", "report", "empirical", "findings", "that", "highlight", "the", "importance", "of", "mmt", "models", "\u2019", "interpretability", ",", "and", "discuss", "how", "our", "findings", "will", "benefit", "future", "research", "."]}, {"venue": "ACL", "title": "Keep Meeting Summaries on Topic: Abstractive Multi-Modal Meeting Summarization", "abstract": "Transcripts of natural, multi-person meetings differ significantly from documents like news articles, which can make Natural Language Generation models for generating summaries unfocused. We develop an abstractive meeting summarizer from both videos and audios of meeting recordings. Specifically, we propose a multi-modal hierarchical attention across three levels: segment, utterance and word. To narrow down the focus into topically-relevant segments, we jointly model topic segmentation and summarization. In addition to traditional text features, we introduce new multi-modal features derived from visual focus of attention, based on the assumption that the utterance is more important if the speaker receives more attention. Experiments show that our model significantly outperforms the state-of-the-art with both BLEU and ROUGE measures.", "doc_id": "f984bf90967030ddca9f5ebdb7ebfcb3", "publication_year": 2019, "sentences": ["transcripts of natural , multi - person meetings differ significantly from documents like news articles , which can make natural language generation models for generating summaries unfocused .", "we develop an abstractive meeting summarizer from both videos and audios of meeting recordings .", "specifically , we propose a multi - modal hierarchical attention across three levels : segment , utterance and word .", "to narrow down the focus into topically - relevant segments , we jointly model topic segmentation and summarization .", "in addition to traditional text features , we introduce new multi - modal features derived from visual focus of attention , based on the assumption that the utterance is more important if the speaker receives more attention .", "experiments show that our model significantly outperforms the state - of - the - art with both bleu and rouge measures ."], "events": [{"event_type": "ITT", "arguments": [{"text": "transcripts of natural , multi - person meetings", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["transcripts", "of", "natural", ",", "multi", "-", "person", "meetings"], "offsets": [0, 1, 2, 3, 4, 5, 6, 7]}], "trigger": {"text": "differ", "tokens": ["differ"], "offsets": [8]}}, {"event_type": "RWF", "arguments": [{"text": "natural language generation models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["natural", "language", "generation", "models"], "offsets": [19, 20, 21, 22]}, {"text": "generating", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["generating"], "offsets": [24]}, {"text": "unfocused", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["unfocused"], "offsets": [26]}], "trigger": {"text": "make", "tokens": ["make"], "offsets": [18]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [28]}, {"text": "abstractive meeting summarizer", "nugget_type": "APP", "argument_type": "Content", "tokens": ["abstractive", "meeting", "summarizer"], "offsets": [31, 32, 33]}], "trigger": {"text": "develop", "tokens": ["develop"], "offsets": [29]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [45]}, {"text": "multi - modal hierarchical attention", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["multi", "-", "modal", "hierarchical", "attention"], "offsets": [48, 49, 50, 51, 52]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [46]}}, {"event_type": "MDS", "arguments": [{"text": "narrow", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["narrow"], "offsets": [64]}, {"text": "topic segmentation", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["topic", "segmentation"], "offsets": [77, 78]}, {"text": "topic summarization", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["topic", "summarization"], "offsets": [77, 80]}], "trigger": {"text": "model", "tokens": ["model"], "offsets": [76]}}, {"event_type": "PUR", "arguments": [{"text": "focus", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["focus"], "offsets": [67]}, {"text": "into topically - relevant segments", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["into", "topically", "-", "relevant", "segments"], "offsets": [68, 69, 70, 71, 72]}], "trigger": {"text": "narrow", "tokens": ["narrow"], "offsets": [64]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [89]}, {"text": "multi - modal features", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["multi", "-", "modal", "features"], "offsets": [92, 93, 94, 95]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [90]}}, {"event_type": "FIN", "arguments": [{"text": "outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforms"], "offsets": [126]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [121]}}, {"event_type": "CMP", "arguments": [{"text": "abstractive meeting summarizer", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["abstractive", "meeting", "summarizer"], "offsets": [31, 32, 33]}, {"text": "significantly", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significantly"], "offsets": [125]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [126]}, {"text": "state - of - the - art", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art"], "offsets": [128, 129, 130, 131, 132, 133, 134]}, {"text": "bleu measures", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["bleu", "measures"], "offsets": [137, 140]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [126]}}, {"event_type": "PUR", "arguments": [{"text": "summaries", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["summaries"], "offsets": [25]}], "trigger": {"text": "generating", "tokens": ["generating"], "offsets": [24]}}], "document": ["transcripts", "of", "natural", ",", "multi", "-", "person", "meetings", "differ", "significantly", "from", "documents", "like", "news", "articles", ",", "which", "can", "make", "natural", "language", "generation", "models", "for", "generating", "summaries", "unfocused", ".", "we", "develop", "an", "abstractive", "meeting", "summarizer", "from", "both", "videos", "and", "audios", "of", "meeting", "recordings", ".", "specifically", ",", "we", "propose", "a", "multi", "-", "modal", "hierarchical", "attention", "across", "three", "levels", ":", "segment", ",", "utterance", "and", "word", ".", "to", "narrow", "down", "the", "focus", "into", "topically", "-", "relevant", "segments", ",", "we", "jointly", "model", "topic", "segmentation", "and", "summarization", ".", "in", "addition", "to", "traditional", "text", "features", ",", "we", "introduce", "new", "multi", "-", "modal", "features", "derived", "from", "visual", "focus", "of", "attention", ",", "based", "on", "the", "assumption", "that", "the", "utterance", "is", "more", "important", "if", "the", "speaker", "receives", "more", "attention", ".", "experiments", "show", "that", "our", "model", "significantly", "outperforms", "the", "state", "-", "of", "-", "the", "-", "art", "with", "both", "bleu", "and", "rouge", "measures", "."]}, {"venue": "ACL", "title": "Generated Knowledge Prompting for Commonsense Reasoning", "abstract": "It remains an open question whether incorporating external knowledge benefits commonsense reasoning while maintaining the flexibility of pretrained sequence models. To investigate this question, we develop generated knowledge prompting, which consists of generating knowledge from a language model, then providing the knowledge as additional input when answering a question. Our method does not require task-specific supervision for knowledge integration, or access to a structured knowledge base, yet it improves performance of large-scale, state-of-the-art models on four commonsense reasoning tasks, achieving state-of-the-art results on numerical commonsense (NumerSense), general commonsense (CommonsenseQA 2.0), and scientific commonsense (QASC) benchmarks. Generated knowledge prompting highlights large-scale language models as flexible sources of external knowledge for improving commonsense reasoning.Our code is available at github.com/liujch1998/GKP", "doc_id": "138821e79679a3cec75abcab6cae2095", "publication_year": 2022, "sentences": ["it remains an open question whether incorporating external knowledge benefits commonsense reasoning while maintaining the flexibility of pretrained sequence models .", "to investigate this question , we develop generated knowledge prompting , which consists of generating knowledge from a language model , then providing the knowledge as additional input when answering a question .", "our method does not require task - specific supervision for knowledge integration , or access to a structured knowledge base , yet it improves performance of large - scale , state - of - the - art models on four commonsense reasoning tasks , achieving state - of - the - art results on numerical commonsense ( numersense ) , general commonsense ( commonsenseqa 2 . 0 ) , and scientific commonsense ( qasc ) benchmarks .", "generated knowledge prompting highlights large - scale language models as flexible sources of external knowledge for improving commonsense reasoning .", "our code is available at github . com / liujch1998 / gkp"], "events": [{"event_type": "ITT", "arguments": [{"text": "commonsense reasoning", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["commonsense", "reasoning"], "offsets": [10, 11]}], "trigger": {"text": "incorporating", "tokens": ["incorporating"], "offsets": [6]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [26]}, {"text": "generated knowledge prompting", "nugget_type": "APP", "argument_type": "Content", "tokens": ["generated", "knowledge", "prompting"], "offsets": [28, 29, 30]}, {"text": "investigate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["investigate"], "offsets": [22]}], "trigger": {"text": "develop", "tokens": ["develop"], "offsets": [27]}}, {"event_type": "PUR", "arguments": [{"text": "question", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["question"], "offsets": [24]}], "trigger": {"text": "investigate", "tokens": ["investigate"], "offsets": [22]}}, {"event_type": "MDS", "arguments": [{"text": "knowledge", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["knowledge"], "offsets": [36]}, {"text": "language model", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["language", "model"], "offsets": [39, 40]}], "trigger": {"text": "generating", "tokens": ["generating"], "offsets": [35]}}, {"event_type": "MDS", "arguments": [{"text": "when answering a question", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "answering", "a", "question"], "offsets": [49, 50, 51, 52]}, {"text": "knowledge", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["knowledge"], "offsets": [45]}, {"text": "additional input", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["additional", "input"], "offsets": [47, 48]}], "trigger": {"text": "providing", "tokens": ["providing"], "offsets": [43]}}, {"event_type": "FAC", "arguments": [{"text": "generated knowledge prompting", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["generated", "knowledge", "prompting"], "offsets": [28, 29, 30]}, {"text": "performance of large - scale , state - of - the - art models", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["performance", "of", "large", "-", "scale", ",", "state", "-", "of", "-", "the", "-", "art", "models"], "offsets": [78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91]}, {"text": "on four commonsense reasoning tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "four", "commonsense", "reasoning", "tasks"], "offsets": [92, 93, 94, 95, 96]}], "trigger": {"text": "improves", "tokens": ["improves"], "offsets": [77]}}, {"event_type": "FAC", "arguments": [{"text": "generated knowledge prompting", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["generated", "knowledge", "prompting"], "offsets": [28, 29, 30]}, {"text": "state - of - the - art results", "nugget_type": "STR", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art", "results"], "offsets": [99, 100, 101, 102, 103, 104, 105, 106]}, {"text": "on numerical commonsense ( numersense ) , general commonsense ( commonsenseqa 2 . 0 ) , and scientific commonsense ( qasc ) benchmarks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "numerical", "commonsense", ",", "general", "commonsense", ",", "and", "scientific", "commonsense", "benchmarks"], "offsets": [107, 108, 109, 113, 114, 115, 122, 123, 124, 125, 129]}], "trigger": {"text": "achieving", "tokens": ["achieving"], "offsets": [98]}}, {"event_type": "FAC", "arguments": [{"text": "generated knowledge prompting", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["generated", "knowledge", "prompting"], "offsets": [131, 132, 133]}, {"text": "large - scale language models", "nugget_type": "APP", "argument_type": "Object", "tokens": ["large", "-", "scale", "language", "models"], "offsets": [135, 136, 137, 138, 139]}, {"text": "improving", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improving"], "offsets": [147]}], "trigger": {"text": "highlights", "tokens": ["highlights"], "offsets": [134]}}, {"event_type": "PUR", "arguments": [{"text": "commonsense reasoning", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["commonsense", "reasoning"], "offsets": [148, 149]}], "trigger": {"text": "improving", "tokens": ["improving"], "offsets": [147]}}], "document": ["it", "remains", "an", "open", "question", "whether", "incorporating", "external", "knowledge", "benefits", "commonsense", "reasoning", "while", "maintaining", "the", "flexibility", "of", "pretrained", "sequence", "models", ".", "to", "investigate", "this", "question", ",", "we", "develop", "generated", "knowledge", "prompting", ",", "which", "consists", "of", "generating", "knowledge", "from", "a", "language", "model", ",", "then", "providing", "the", "knowledge", "as", "additional", "input", "when", "answering", "a", "question", ".", "our", "method", "does", "not", "require", "task", "-", "specific", "supervision", "for", "knowledge", "integration", ",", "or", "access", "to", "a", "structured", "knowledge", "base", ",", "yet", "it", "improves", "performance", "of", "large", "-", "scale", ",", "state", "-", "of", "-", "the", "-", "art", "models", "on", "four", "commonsense", "reasoning", "tasks", ",", "achieving", "state", "-", "of", "-", "the", "-", "art", "results", "on", "numerical", "commonsense", "(", "numersense", ")", ",", "general", "commonsense", "(", "commonsenseqa", "2", ".", "0", ")", ",", "and", "scientific", "commonsense", "(", "qasc", ")", "benchmarks", ".", "generated", "knowledge", "prompting", "highlights", "large", "-", "scale", "language", "models", "as", "flexible", "sources", "of", "external", "knowledge", "for", "improving", "commonsense", "reasoning", ".", "our", "code", "is", "available", "at", "github", ".", "com", "/", "liujch1998", "/", "gkp"]}, {"venue": "ACL", "title": "Spying on Your Neighbors: Fine-grained Probing of Contextual Embeddings for Information about Surrounding Words", "abstract": "Although models using contextual word embeddings have achieved state-of-the-art results on a host of NLP tasks, little is known about exactly what information these embeddings encode about the context words that they are understood to reflect. To address this question, we introduce a suite of probing tasks that enable fine-grained testing of contextual embeddings for encoding of information about surrounding words. We apply these tasks to examine the popular BERT, ELMo and GPT contextual encoders, and find that each of our tested information types is indeed encoded as contextual information across tokens, often with near-perfect recoverability\u2014but the encoders vary in which features they distribute to which tokens, how nuanced their distributions are, and how robust the encoding of each feature is to distance. We discuss implications of these results for how different types of models break down and prioritize word-level context information when constructing token embeddings.", "doc_id": "bae145357b9e541c87f0ed2eea801618", "publication_year": 2020, "sentences": ["although models using contextual word embeddings have achieved state - of - the - art results on a host of nlp tasks , little is known about exactly what information these embeddings encode about the context words that they are understood to reflect .", "to address this question , we introduce a suite of probing tasks that enable fine - grained testing of contextual embeddings for encoding of information about surrounding words .", "we apply these tasks to examine the popular bert , elmo and gpt contextual encoders , and find that each of our tested information types is indeed encoded as contextual information across tokens , often with near - perfect recoverability \u2014 but the encoders vary in which features they distribute to which tokens , how nuanced their distributions are , and how robust the encoding of each feature is to distance .", "we discuss implications of these results for how different types of models break down and prioritize word - level context information when constructing token embeddings ."], "events": [{"event_type": "ITT", "arguments": [{"text": "contextual word embeddings", "nugget_type": "MOD", "argument_type": "Target", "tokens": ["contextual", "word", "embeddings"], "offsets": [3, 4, 5]}], "trigger": {"text": "achieved", "tokens": ["achieved"], "offsets": [7]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [49]}, {"text": "suite of probing tasks", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["suite", "of", "probing", "tasks"], "offsets": [52, 53, 54, 55]}, {"text": "enable", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["enable"], "offsets": [57]}, {"text": "encoding", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["encoding"], "offsets": [66]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [50]}}, {"event_type": "PUR", "arguments": [{"text": "fine - grained testing of contextual embeddings", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["fine", "-", "grained", "testing", "of", "contextual", "embeddings"], "offsets": [58, 59, 60, 61, 62, 63, 64]}], "trigger": {"text": "enable", "tokens": ["enable"], "offsets": [57]}}, {"event_type": "PUR", "arguments": [{"text": "surrounding words", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["surrounding", "words"], "offsets": [70, 71]}], "trigger": {"text": "encoding", "tokens": ["encoding"], "offsets": [66]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [73]}, {"text": "popular bert encoders", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["popular", "bert", "encoders"], "offsets": [80, 81, 87]}, {"text": "elmo encoders", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["elmo", "encoders"], "offsets": [83, 87]}, {"text": "gpt contextual encoders", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["gpt", "contextual", "encoders"], "offsets": [85, 86, 87]}], "trigger": {"text": "examine", "tokens": ["examine"], "offsets": [78]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [73]}, {"text": "indeed encoded", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["indeed", "encoded"], "offsets": [99, 100]}, {"text": "vary in", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["vary", "in"], "offsets": [117, 118]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [90]}}, {"event_type": "FAC", "arguments": [{"text": "each of our tested information types", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["each", "of", "our", "tested", "information", "types"], "offsets": [92, 93, 94, 95, 96, 97]}, {"text": "contextual information across tokens", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["contextual", "information", "across", "tokens"], "offsets": [102, 103, 104, 105]}], "trigger": {"text": "indeed encoded", "tokens": ["indeed", "encoded"], "offsets": [99, 100]}}, {"event_type": "FAC", "arguments": [{"text": "encoders", "nugget_type": "MOD", "argument_type": "Subject", "tokens": ["encoders"], "offsets": [116]}, {"text": "distributions", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["distributions"], "offsets": [130]}, {"text": "features", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["features"], "offsets": [120]}, {"text": "they distribute to which tokens", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["they", "distribute", "to", "which", "tokens"], "offsets": [121, 122, 123, 124, 125]}, {"text": "encoding of each feature", "nugget_type": "MOD", "argument_type": "Object", "tokens": ["encoding", "of", "each", "feature"], "offsets": [137, 138, 139, 140]}], "trigger": {"text": "vary in", "tokens": ["vary", "in"], "offsets": [117, 118]}}, {"event_type": "MDS", "arguments": [{"text": "different types of models", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["different", "types", "of", "models"], "offsets": [153, 154, 155, 156]}, {"text": "word - level context information", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["word", "-", "level", "context", "information"], "offsets": [161, 162, 163, 164, 165]}, {"text": "when constructing token embeddings", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "constructing", "token", "embeddings"], "offsets": [166, 167, 168, 169]}], "trigger": {"text": "break down and prioritize", "tokens": ["break", "down", "and", "prioritize"], "offsets": [157, 158, 159, 160]}}], "document": ["although", "models", "using", "contextual", "word", "embeddings", "have", "achieved", "state", "-", "of", "-", "the", "-", "art", "results", "on", "a", "host", "of", "nlp", "tasks", ",", "little", "is", "known", "about", "exactly", "what", "information", "these", "embeddings", "encode", "about", "the", "context", "words", "that", "they", "are", "understood", "to", "reflect", ".", "to", "address", "this", "question", ",", "we", "introduce", "a", "suite", "of", "probing", "tasks", "that", "enable", "fine", "-", "grained", "testing", "of", "contextual", "embeddings", "for", "encoding", "of", "information", "about", "surrounding", "words", ".", "we", "apply", "these", "tasks", "to", "examine", "the", "popular", "bert", ",", "elmo", "and", "gpt", "contextual", "encoders", ",", "and", "find", "that", "each", "of", "our", "tested", "information", "types", "is", "indeed", "encoded", "as", "contextual", "information", "across", "tokens", ",", "often", "with", "near", "-", "perfect", "recoverability", "\u2014", "but", "the", "encoders", "vary", "in", "which", "features", "they", "distribute", "to", "which", "tokens", ",", "how", "nuanced", "their", "distributions", "are", ",", "and", "how", "robust", "the", "encoding", "of", "each", "feature", "is", "to", "distance", ".", "we", "discuss", "implications", "of", "these", "results", "for", "how", "different", "types", "of", "models", "break", "down", "and", "prioritize", "word", "-", "level", "context", "information", "when", "constructing", "token", "embeddings", "."]}, {"venue": "ACL", "title": "Weakly Supervised Word Segmentation for Computational Language Documentation", "abstract": "Word and morpheme segmentation are fundamental steps of language documentation as they allow to discover lexical units in a language for which the lexicon is unknown. However, in most language documentation scenarios, linguists do not start from a blank page: they may already have a pre-existing dictionary or have initiated manual segmentation of a small part of their data. This paper studies how such a weak supervision can be taken advantage of in Bayesian non-parametric models of segmentation. Our experiments on two very low resource languages (Mboshi and Japhug), whose documentation is still in progress, show that weak supervision can be beneficial to the segmentation quality. In addition, we investigate an incremental learning scenario where manual segmentations are provided in a sequential manner. This work opens the way for interactive annotation tools for documentary linguists.", "doc_id": "38dd692d5bf8b48220549bfb2805bea8", "publication_year": 2022, "sentences": ["word and morpheme segmentation are fundamental steps of language documentation as they allow to discover lexical units in a language for which the lexicon is unknown .", "however , in most language documentation scenarios , linguists do not start from a blank page : they may already have a pre - existing dictionary or have initiated manual segmentation of a small part of their data .", "this paper studies how such a weak supervision can be taken advantage of in bayesian non - parametric models of segmentation .", "our experiments on two very low resource languages ( mboshi and japhug ) , whose documentation is still in progress , show that weak supervision can be beneficial to the segmentation quality .", "in addition , we investigate an incremental learning scenario where manual segmentations are provided in a sequential manner .", "this work opens the way for interactive annotation tools for documentary linguists ."], "events": [{"event_type": "ITT", "arguments": [], "trigger": {"text": "fundamental steps", "tokens": ["fundamental", "steps"], "offsets": [5, 6]}}, {"event_type": "WKS", "arguments": [{"text": "weak supervision", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["weak", "supervision"], "offsets": [72, 73]}, {"text": "in bayesian non - parametric models of segmentation", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "bayesian", "non", "-", "parametric", "models", "of", "segmentation"], "offsets": [79, 80, 81, 82, 83, 84, 85, 86]}], "trigger": {"text": "taken advantage", "tokens": ["taken", "advantage"], "offsets": [76, 77]}}, {"event_type": "FIN", "arguments": [{"text": "beneficial", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["beneficial"], "offsets": [115]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [109]}}, {"event_type": "FAC", "arguments": [{"text": "weak supervision", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["weak", "supervision"], "offsets": [111, 112]}, {"text": "segmentation quality", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["segmentation", "quality"], "offsets": [118, 119]}], "trigger": {"text": "beneficial", "tokens": ["beneficial"], "offsets": [115]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [124]}, {"text": "where manual segmentations are provided in a sequential manner", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["where", "manual", "segmentations", "are", "provided", "in", "a", "sequential", "manner"], "offsets": [130, 131, 132, 133, 134, 135, 136, 137, 138]}, {"text": "incremental learning scenario", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["incremental", "learning", "scenario"], "offsets": [127, 128, 129]}], "trigger": {"text": "investigate", "tokens": ["investigate"], "offsets": [125]}}], "document": ["word", "and", "morpheme", "segmentation", "are", "fundamental", "steps", "of", "language", "documentation", "as", "they", "allow", "to", "discover", "lexical", "units", "in", "a", "language", "for", "which", "the", "lexicon", "is", "unknown", ".", "however", ",", "in", "most", "language", "documentation", "scenarios", ",", "linguists", "do", "not", "start", "from", "a", "blank", "page", ":", "they", "may", "already", "have", "a", "pre", "-", "existing", "dictionary", "or", "have", "initiated", "manual", "segmentation", "of", "a", "small", "part", "of", "their", "data", ".", "this", "paper", "studies", "how", "such", "a", "weak", "supervision", "can", "be", "taken", "advantage", "of", "in", "bayesian", "non", "-", "parametric", "models", "of", "segmentation", ".", "our", "experiments", "on", "two", "very", "low", "resource", "languages", "(", "mboshi", "and", "japhug", ")", ",", "whose", "documentation", "is", "still", "in", "progress", ",", "show", "that", "weak", "supervision", "can", "be", "beneficial", "to", "the", "segmentation", "quality", ".", "in", "addition", ",", "we", "investigate", "an", "incremental", "learning", "scenario", "where", "manual", "segmentations", "are", "provided", "in", "a", "sequential", "manner", ".", "this", "work", "opens", "the", "way", "for", "interactive", "annotation", "tools", "for", "documentary", "linguists", "."]}, {"venue": "ACL", "title": "Give Me Convenience and Give Her Death: Who Should Decide What Uses of NLP are Appropriate, and on What Basis?", "abstract": "As part of growing NLP capabilities, coupled with an awareness of the ethical dimensions of research, questions have been raised about whether particular datasets and tasks should be deemed off-limits for NLP research. We examine this question with respect to a paper on automatic legal sentencing from EMNLP 2019 which was a source of some debate, in asking whether the paper should have been allowed to be published, who should have been charged with making such a decision, and on what basis. We focus in particular on the role of data statements in ethically assessing research, but also discuss the topic of dual use, and examine the outcomes of similar debates in other scientific disciplines.", "doc_id": "0174fd4d4bd13e5933b34f7346f22b45", "publication_year": 2020, "sentences": ["as part of growing nlp capabilities , coupled with an awareness of the ethical dimensions of research , questions have been raised about whether particular datasets and tasks should be deemed off - limits for nlp research .", "we examine this question with respect to a paper on automatic legal sentencing from emnlp 2019 which was a source of some debate , in asking whether the paper should have been allowed to be published , who should have been charged with making such a decision , and on what basis .", "we focus in particular on the role of data statements in ethically assessing research , but also discuss the topic of dual use , and examine the outcomes of similar debates in other scientific disciplines ."], "events": [{"event_type": "ITT", "arguments": [{"text": "nlp research", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["nlp", "research"], "offsets": [35, 36]}], "trigger": {"text": "deemed", "tokens": ["deemed"], "offsets": [30]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [38]}, {"text": "whether particular datasets and tasks should be deemed off - limits for nlp research", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["whether", "particular", "datasets", "and", "tasks", "should", "be", "deemed", "off", "-", "limits", "for", "nlp", "research"], "offsets": [23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36]}, {"text": "with respect to a paper on automatic legal sentencing from emnlp 2019", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "respect", "to", "a", "paper", "on", "automatic", "legal", "sentencing", "from", "emnlp", "2019"], "offsets": [42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53]}], "trigger": {"text": "examine", "tokens": ["examine"], "offsets": [39]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [91]}, {"text": "in ethically assessing research", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "ethically", "assessing", "research"], "offsets": [101, 102, 103, 104]}, {"text": "role of data statements", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["role", "of", "data", "statements"], "offsets": [97, 98, 99, 100]}], "trigger": {"text": "focus in particular on", "tokens": ["focus", "in", "particular", "on"], "offsets": [92, 93, 94, 95]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [91]}, {"text": "topic of dual use", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["topic", "of", "dual", "use"], "offsets": [110, 111, 112, 113]}], "trigger": {"text": "discuss", "tokens": ["discuss"], "offsets": [108]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [91]}, {"text": "in other scientific disciplines", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "other", "scientific", "disciplines"], "offsets": [122, 123, 124, 125]}, {"text": "outcomes of similar debates", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["outcomes", "of", "similar", "debates"], "offsets": [118, 119, 120, 121]}], "trigger": {"text": "examine", "tokens": ["examine"], "offsets": [116]}}], "document": ["as", "part", "of", "growing", "nlp", "capabilities", ",", "coupled", "with", "an", "awareness", "of", "the", "ethical", "dimensions", "of", "research", ",", "questions", "have", "been", "raised", "about", "whether", "particular", "datasets", "and", "tasks", "should", "be", "deemed", "off", "-", "limits", "for", "nlp", "research", ".", "we", "examine", "this", "question", "with", "respect", "to", "a", "paper", "on", "automatic", "legal", "sentencing", "from", "emnlp", "2019", "which", "was", "a", "source", "of", "some", "debate", ",", "in", "asking", "whether", "the", "paper", "should", "have", "been", "allowed", "to", "be", "published", ",", "who", "should", "have", "been", "charged", "with", "making", "such", "a", "decision", ",", "and", "on", "what", "basis", ".", "we", "focus", "in", "particular", "on", "the", "role", "of", "data", "statements", "in", "ethically", "assessing", "research", ",", "but", "also", "discuss", "the", "topic", "of", "dual", "use", ",", "and", "examine", "the", "outcomes", "of", "similar", "debates", "in", "other", "scientific", "disciplines", "."]}, {"venue": "ACL", "title": "Unsupervised Information Extraction: Regularizing Discriminative Approaches with Relation Distribution Losses", "abstract": "Unsupervised relation extraction aims at extracting relations between entities in text. Previous unsupervised approaches are either generative or discriminative. In a supervised setting, discriminative approaches, such as deep neural network classifiers, have demonstrated substantial improvement. However, these models are hard to train without supervision, and the currently proposed solutions are unstable. To overcome this limitation, we introduce a skewness loss which encourages the classifier to predict a relation with confidence given a sentence, and a distribution distance loss enforcing that all relations are predicted in average. These losses improve the performance of discriminative based models, and enable us to train deep neural networks satisfactorily, surpassing current state of the art on three different datasets.", "doc_id": "8fb81ecb040bc192cdaae50be829d7e6", "publication_year": 2019, "sentences": ["unsupervised relation extraction aims at extracting relations between entities in text .", "previous unsupervised approaches are either generative or discriminative .", "in a supervised setting , discriminative approaches , such as deep neural network classifiers , have demonstrated substantial improvement .", "however , these models are hard to train without supervision , and the currently proposed solutions are unstable .", "to overcome this limitation , we introduce a skewness loss which encourages the classifier to predict a relation with confidence given a sentence , and a distribution distance loss enforcing that all relations are predicted in average .", "these losses improve the performance of discriminative based models , and enable us to train deep neural networks satisfactorily , surpassing current state of the art on three different datasets ."], "events": [{"event_type": "ITT", "arguments": [{"text": "unsupervised relation extraction", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["unsupervised", "relation", "extraction"], "offsets": [0, 1, 2]}], "trigger": {"text": "aims", "tokens": ["aims"], "offsets": [3]}}, {"event_type": "RWF", "arguments": [{"text": "discriminative approaches", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["discriminative", "approaches"], "offsets": [26, 27]}, {"text": "hard", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["hard"], "offsets": [46]}, {"text": "without supervision", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["without", "supervision"], "offsets": [49, 50]}], "trigger": {"text": "train", "tokens": ["train"], "offsets": [48]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [65]}, {"text": "skewness loss", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["skewness", "loss"], "offsets": [68, 69]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [66]}}, {"event_type": "MDS", "arguments": [{"text": "classifier", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["classifier"], "offsets": [73]}, {"text": "predict", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["predict"], "offsets": [75]}], "trigger": {"text": "encourages", "tokens": ["encourages"], "offsets": [71]}}, {"event_type": "PUR", "arguments": [{"text": "relation with confidence given a sentence", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["relation", "with", "confidence", "given", "a", "sentence"], "offsets": [77, 78, 79, 80, 81, 82]}, {"text": "distribution distance loss", "nugget_type": "MOD", "argument_type": "Aim", "tokens": ["distribution", "distance", "loss"], "offsets": [86, 87, 88]}], "trigger": {"text": "predict", "tokens": ["predict"], "offsets": [75]}}, {"event_type": "CMP", "arguments": [{"text": "improve", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improve"], "offsets": [100]}, {"text": "skewness loss", "nugget_type": "MOD", "argument_type": "Arg1", "tokens": ["skewness", "loss"], "offsets": [68, 69]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [100]}}, {"event_type": "CMP", "arguments": [{"text": "surpassing", "nugget_type": "STR", "argument_type": "Result", "tokens": ["surpassing"], "offsets": [118]}, {"text": "current state of the art", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["current", "state", "of", "the", "art"], "offsets": [119, 120, 121, 122, 123]}], "trigger": {"text": "surpassing", "tokens": ["surpassing"], "offsets": [118]}}], "document": ["unsupervised", "relation", "extraction", "aims", "at", "extracting", "relations", "between", "entities", "in", "text", ".", "previous", "unsupervised", "approaches", "are", "either", "generative", "or", "discriminative", ".", "in", "a", "supervised", "setting", ",", "discriminative", "approaches", ",", "such", "as", "deep", "neural", "network", "classifiers", ",", "have", "demonstrated", "substantial", "improvement", ".", "however", ",", "these", "models", "are", "hard", "to", "train", "without", "supervision", ",", "and", "the", "currently", "proposed", "solutions", "are", "unstable", ".", "to", "overcome", "this", "limitation", ",", "we", "introduce", "a", "skewness", "loss", "which", "encourages", "the", "classifier", "to", "predict", "a", "relation", "with", "confidence", "given", "a", "sentence", ",", "and", "a", "distribution", "distance", "loss", "enforcing", "that", "all", "relations", "are", "predicted", "in", "average", ".", "these", "losses", "improve", "the", "performance", "of", "discriminative", "based", "models", ",", "and", "enable", "us", "to", "train", "deep", "neural", "networks", "satisfactorily", ",", "surpassing", "current", "state", "of", "the", "art", "on", "three", "different", "datasets", "."]}, {"venue": "ACL", "title": "Learning Efficient Dialogue Policy from Demonstrations through Shaping", "abstract": "Training a task-oriented dialogue agent with reinforcement learning is prohibitively expensive since it requires a large volume of interactions with users. Human demonstrations can be used to accelerate learning progress. However, how to effectively leverage demonstrations to learn dialogue policy remains less explored. In this paper, we present S\u02c62Agent that efficiently learns dialogue policy from demonstrations through policy shaping and reward shaping. We use an imitation model to distill knowledge from demonstrations, based on which policy shaping estimates feedback on how the agent should act in policy space. Reward shaping is then incorporated to bonus state-actions similar to demonstrations explicitly in value space encouraging better exploration. The effectiveness of the proposed S\u02c62Agentt is demonstrated in three dialogue domains and a challenging domain adaptation task with both user simulator evaluation and human evaluation.", "doc_id": "2f81c0870746b184aa57aa0aff9e6b2f", "publication_year": 2020, "sentences": ["training a task - oriented dialogue agent with reinforcement learning is prohibitively expensive since it requires a large volume of interactions with users .", "human demonstrations can be used to accelerate learning progress .", "however , how to effectively leverage demonstrations to learn dialogue policy remains less explored .", "in this paper , we present [UNK] that efficiently learns dialogue policy from demonstrations through policy shaping and reward shaping .", "we use an imitation model to distill knowledge from demonstrations , based on which policy shaping estimates feedback on how the agent should act in policy space .", "reward shaping is then incorporated to bonus state - actions similar to demonstrations explicitly in value space encouraging better exploration .", "the effectiveness of the proposed [UNK] is demonstrated in three dialogue domains and a challenging domain adaptation task with both user simulator evaluation and human evaluation ."], "events": [{"event_type": "RWF", "arguments": [{"text": "task - oriented dialogue agent", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["task", "-", "oriented", "dialogue", "agent"], "offsets": [2, 3, 4, 5, 6]}, {"text": "prohibitively", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["prohibitively"], "offsets": [11]}, {"text": "expensive", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["expensive"], "offsets": [12]}, {"text": "reinforcement learning", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["reinforcement", "learning"], "offsets": [8, 9]}], "trigger": {"text": "training", "tokens": ["training"], "offsets": [0]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [53]}, {"text": "[UNK]", "nugget_type": "APP", "argument_type": "Content", "tokens": ["[UNK]"], "offsets": [55]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [54]}}, {"event_type": "MDS", "arguments": [{"text": "dialogue policy", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["dialogue", "policy"], "offsets": [59, 60]}, {"text": "demonstrations", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["demonstrations"], "offsets": [62]}, {"text": "policy shaping", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["policy", "shaping"], "offsets": [64, 65]}, {"text": "reward shaping", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["reward", "shaping"], "offsets": [67, 68]}], "trigger": {"text": "learns", "tokens": ["learns"], "offsets": [58]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [70]}, {"text": "imitation model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["imitation", "model"], "offsets": [73, 74]}, {"text": "distill", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["distill"], "offsets": [76]}, {"text": "from demonstrations", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "demonstrations"], "offsets": [78, 79]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [71]}}, {"event_type": "PUR", "arguments": [{"text": "knowledge", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["knowledge"], "offsets": [77]}], "trigger": {"text": "distill", "tokens": ["distill"], "offsets": [76]}}, {"event_type": "FAC", "arguments": [{"text": "in three dialogue domains", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "three", "dialogue", "domains"], "offsets": [127, 128, 129, 130]}, {"text": "effectiveness of the proposed [UNK]", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["effectiveness", "of", "the", "proposed", "[UNK]"], "offsets": [120, 121, 122, 123, 124]}], "trigger": {"text": "demonstrated", "tokens": ["demonstrated"], "offsets": [126]}}], "document": ["training", "a", "task", "-", "oriented", "dialogue", "agent", "with", "reinforcement", "learning", "is", "prohibitively", "expensive", "since", "it", "requires", "a", "large", "volume", "of", "interactions", "with", "users", ".", "human", "demonstrations", "can", "be", "used", "to", "accelerate", "learning", "progress", ".", "however", ",", "how", "to", "effectively", "leverage", "demonstrations", "to", "learn", "dialogue", "policy", "remains", "less", "explored", ".", "in", "this", "paper", ",", "we", "present", "[UNK]", "that", "efficiently", "learns", "dialogue", "policy", "from", "demonstrations", "through", "policy", "shaping", "and", "reward", "shaping", ".", "we", "use", "an", "imitation", "model", "to", "distill", "knowledge", "from", "demonstrations", ",", "based", "on", "which", "policy", "shaping", "estimates", "feedback", "on", "how", "the", "agent", "should", "act", "in", "policy", "space", ".", "reward", "shaping", "is", "then", "incorporated", "to", "bonus", "state", "-", "actions", "similar", "to", "demonstrations", "explicitly", "in", "value", "space", "encouraging", "better", "exploration", ".", "the", "effectiveness", "of", "the", "proposed", "[UNK]", "is", "demonstrated", "in", "three", "dialogue", "domains", "and", "a", "challenging", "domain", "adaptation", "task", "with", "both", "user", "simulator", "evaluation", "and", "human", "evaluation", "."]}, {"venue": "ACL", "title": "Does Recommend-Revise Produce Reliable Annotations? An Analysis on Missing Instances in DocRED", "abstract": "DocRED is a widely used dataset for document-level relation extraction. In the large-scale annotation, a recommend-revise scheme is adopted to reduce the workload. Within this scheme, annotators are provided with candidate relation instances from distant supervision, and they then manually supplement and remove relational facts based on the recommendations. However, when comparing DocRED with a subset relabeled from scratch, we find that this scheme results in a considerable amount of false negative samples and an obvious bias towards popular entities and relations. Furthermore, we observe that the models trained on DocRED have low recall on our relabeled dataset and inherit the same bias in the training data. Through the analysis of annotators\u2019 behaviors, we figure out the underlying reason for the problems above: the scheme actually discourages annotators from supplementing adequate instances in the revision phase. We appeal to future research to take into consideration the issues with the recommend-revise scheme when designing new models and annotation schemes. The relabeled dataset is released at https://github.com/AndrewZhe/Revisit-DocRED, to serve as a more reliable test set of document RE models.", "doc_id": "7ddfbd5ccc43ce0c521174051cc0570f", "publication_year": 2022, "sentences": ["docred is a widely used dataset for document - level relation extraction .", "in the large - scale annotation , a recommend - revise scheme is adopted to reduce the workload .", "within this scheme , annotators are provided with candidate relation instances from distant supervision , and they then manually supplement and remove relational facts based on the recommendations .", "however , when comparing docred with a subset relabeled from scratch , we find that this scheme results in a considerable amount of false negative samples and an obvious bias towards popular entities and relations .", "furthermore , we observe that the models trained on docred have low recall on our relabeled dataset and inherit the same bias in the training data .", "through the analysis of annotators \u2019 behaviors , we figure out the underlying reason for the problems above : the scheme actually discourages annotators from supplementing adequate instances in the revision phase .", "we appeal to future research to take into consideration the issues with the recommend - revise scheme when designing new models and annotation schemes .", "the relabeled dataset is released at https : / / github . com / andrewzhe / revisit - docred , to serve as a more reliable test set of document re models ."], "events": [{"event_type": "ITT", "arguments": [{"text": "document - level relation extraction", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["document", "-", "level", "relation", "extraction"], "offsets": [7, 8, 9, 10, 11]}], "trigger": {"text": "dataset", "tokens": ["dataset"], "offsets": [5]}}, {"event_type": "RWS", "arguments": [{"text": "candidate relation instances", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["candidate", "relation", "instances"], "offsets": [40, 41, 42]}, {"text": "relational facts", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["relational", "facts"], "offsets": [54, 55]}], "trigger": {"text": "supplement and remove", "tokens": ["supplement", "and", "remove"], "offsets": [51, 52, 53]}}, {"event_type": "RWF", "arguments": [{"text": "recommend - revise scheme", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["recommend", "-", "revise", "scheme"], "offsets": [21, 22, 23, 24]}, {"text": "false negative samples", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["false", "negative", "samples"], "offsets": [84, 85, 86]}, {"text": "obvious bias", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["obvious", "bias"], "offsets": [89, 90]}], "trigger": {"text": "results", "tokens": ["results"], "offsets": [78]}}, {"event_type": "RWF", "arguments": [{"text": "models trained on docred", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["models", "trained", "on", "docred"], "offsets": [103, 104, 105, 106]}, {"text": "low recall", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["low", "recall"], "offsets": [108, 109]}, {"text": "relabeled dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["relabeled", "dataset"], "offsets": [112, 113]}], "trigger": {"text": "have", "tokens": ["have"], "offsets": [107]}}, {"event_type": "RWF", "arguments": [{"text": "same bias", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["same", "bias"], "offsets": [117, 118]}, {"text": "in the training data", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "training", "data"], "offsets": [119, 120, 121, 122]}], "trigger": {"text": "inherit", "tokens": ["inherit"], "offsets": [115]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [132]}, {"text": "underlying reason", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["underlying", "reason"], "offsets": [136, 137]}, {"text": "through the analysis of annotators \u2019 behaviors", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["through", "the", "analysis", "of", "annotators", "\u2019", "behaviors"], "offsets": [124, 125, 126, 127, 128, 129, 130]}], "trigger": {"text": "figure out", "tokens": ["figure", "out"], "offsets": [133, 134]}}, {"event_type": "FAC", "arguments": [{"text": "recommend - revise scheme", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["recommend", "-", "revise", "scheme"], "offsets": [21, 22, 23, 24]}, {"text": "adequate instances", "nugget_type": "APP", "argument_type": "Object", "tokens": ["adequate", "instances"], "offsets": [150, 151]}, {"text": "in the revision phase", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "revision", "phase"], "offsets": [152, 153, 154, 155]}], "trigger": {"text": "discourages", "tokens": ["discourages"], "offsets": [146]}}, {"event_type": "WKS", "arguments": [{"text": "relabeled dataset", "nugget_type": "DST", "argument_type": "Content", "tokens": ["relabeled", "dataset"], "offsets": [183, 184]}, {"text": "document re models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["document", "re", "models"], "offsets": [211, 212, 213]}], "trigger": {"text": "released", "tokens": ["released"], "offsets": [186]}}], "document": ["docred", "is", "a", "widely", "used", "dataset", "for", "document", "-", "level", "relation", "extraction", ".", "in", "the", "large", "-", "scale", "annotation", ",", "a", "recommend", "-", "revise", "scheme", "is", "adopted", "to", "reduce", "the", "workload", ".", "within", "this", "scheme", ",", "annotators", "are", "provided", "with", "candidate", "relation", "instances", "from", "distant", "supervision", ",", "and", "they", "then", "manually", "supplement", "and", "remove", "relational", "facts", "based", "on", "the", "recommendations", ".", "however", ",", "when", "comparing", "docred", "with", "a", "subset", "relabeled", "from", "scratch", ",", "we", "find", "that", "this", "scheme", "results", "in", "a", "considerable", "amount", "of", "false", "negative", "samples", "and", "an", "obvious", "bias", "towards", "popular", "entities", "and", "relations", ".", "furthermore", ",", "we", "observe", "that", "the", "models", "trained", "on", "docred", "have", "low", "recall", "on", "our", "relabeled", "dataset", "and", "inherit", "the", "same", "bias", "in", "the", "training", "data", ".", "through", "the", "analysis", "of", "annotators", "\u2019", "behaviors", ",", "we", "figure", "out", "the", "underlying", "reason", "for", "the", "problems", "above", ":", "the", "scheme", "actually", "discourages", "annotators", "from", "supplementing", "adequate", "instances", "in", "the", "revision", "phase", ".", "we", "appeal", "to", "future", "research", "to", "take", "into", "consideration", "the", "issues", "with", "the", "recommend", "-", "revise", "scheme", "when", "designing", "new", "models", "and", "annotation", "schemes", ".", "the", "relabeled", "dataset", "is", "released", "at", "https", ":", "/", "/", "github", ".", "com", "/", "andrewzhe", "/", "revisit", "-", "docred", ",", "to", "serve", "as", "a", "more", "reliable", "test", "set", "of", "document", "re", "models", "."]}, {"venue": "ACL", "title": "Beyond Goldfish Memory: Long-Term Open-Domain Conversation", "abstract": "Despite recent improvements in open-domain dialogue models, state of the art models are trained and evaluated on short conversations with little context. In contrast, the long-term conversation setting has hardly been studied. In this work we collect and release a human-human dataset consisting of multiple chat sessions whereby the speaking partners learn about each other\u2019s interests and discuss the things they have learnt from past sessions. We show how existing models trained on existing datasets perform poorly in this long-term conversation setting in both automatic and human evaluations, and we study long-context models that can perform much better. In particular, we find retrieval-augmented methods and methods with an ability to summarize and recall previous conversations outperform the standard encoder-decoder architectures currently considered state of the art.", "doc_id": "eae4b3a4853c93ef08bfbcb33ae07ab3", "publication_year": 2022, "sentences": ["despite recent improvements in open - domain dialogue models , state of the art models are trained and evaluated on short conversations with little context .", "in contrast , the long - term conversation setting has hardly been studied .", "in this work we collect and release a human - human dataset consisting of multiple chat sessions whereby the speaking partners learn about each other \u2019 s interests and discuss the things they have learnt from past sessions .", "we show how existing models trained on existing datasets perform poorly in this long - term conversation setting in both automatic and human evaluations , and we study long - context models that can perform much better .", "in particular , we find retrieval - augmented methods and methods with an ability to summarize and recall previous conversations outperform the standard encoder - decoder architectures currently considered state of the art ."], "events": [{"event_type": "ITT", "arguments": [{"text": "open - domain dialogue models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["open", "-", "domain", "dialogue", "models"], "offsets": [4, 5, 6, 7, 8]}], "trigger": {"text": "trained", "tokens": ["trained"], "offsets": [16]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [43]}, {"text": "human - human dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["human", "-", "human", "dataset"], "offsets": [48, 49, 50, 51]}, {"text": "of multiple chat sessions", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["of", "multiple", "chat", "sessions"], "offsets": [53, 54, 55, 56]}], "trigger": {"text": "collect and release", "tokens": ["collect", "and", "release"], "offsets": [44, 45, 46]}}, {"event_type": "RWF", "arguments": [{"text": "poorly", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["poorly"], "offsets": [89]}, {"text": "long - term conversation setting", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["long", "-", "term", "conversation", "setting"], "offsets": [92, 93, 94, 95, 96]}, {"text": "in both automatic and human evaluations", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "both", "automatic", "and", "human", "evaluations"], "offsets": [97, 98, 99, 100, 101, 102]}, {"text": "existing models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["existing", "models"], "offsets": [82, 83]}], "trigger": {"text": "perform", "tokens": ["perform"], "offsets": [88]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [105]}, {"text": "long - context models", "nugget_type": "APP", "argument_type": "Content", "tokens": ["long", "-", "context", "models"], "offsets": [107, 108, 109, 110]}], "trigger": {"text": "study", "tokens": ["study"], "offsets": [106]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [120]}, {"text": "outperform", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperform"], "offsets": [137]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [121]}}, {"event_type": "CMP", "arguments": [{"text": "retrieval - augmented methods", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["retrieval", "-", "augmented", "methods"], "offsets": [122, 123, 124, 125]}, {"text": "methods with an ability to summarize and recall previous conversations", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["methods", "with", "an", "ability", "to", "summarize", "and", "recall", "previous", "conversations"], "offsets": [127, 128, 129, 130, 131, 132, 133, 134, 135, 136]}, {"text": "standard encoder - decoder architectures", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["standard", "encoder", "-", "decoder", "architectures"], "offsets": [139, 140, 141, 142, 143]}], "trigger": {"text": "outperform", "tokens": ["outperform"], "offsets": [137]}}], "document": ["despite", "recent", "improvements", "in", "open", "-", "domain", "dialogue", "models", ",", "state", "of", "the", "art", "models", "are", "trained", "and", "evaluated", "on", "short", "conversations", "with", "little", "context", ".", "in", "contrast", ",", "the", "long", "-", "term", "conversation", "setting", "has", "hardly", "been", "studied", ".", "in", "this", "work", "we", "collect", "and", "release", "a", "human", "-", "human", "dataset", "consisting", "of", "multiple", "chat", "sessions", "whereby", "the", "speaking", "partners", "learn", "about", "each", "other", "\u2019", "s", "interests", "and", "discuss", "the", "things", "they", "have", "learnt", "from", "past", "sessions", ".", "we", "show", "how", "existing", "models", "trained", "on", "existing", "datasets", "perform", "poorly", "in", "this", "long", "-", "term", "conversation", "setting", "in", "both", "automatic", "and", "human", "evaluations", ",", "and", "we", "study", "long", "-", "context", "models", "that", "can", "perform", "much", "better", ".", "in", "particular", ",", "we", "find", "retrieval", "-", "augmented", "methods", "and", "methods", "with", "an", "ability", "to", "summarize", "and", "recall", "previous", "conversations", "outperform", "the", "standard", "encoder", "-", "decoder", "architectures", "currently", "considered", "state", "of", "the", "art", "."]}, {"venue": "ACL", "title": "Interpretable Neural Predictions with Differentiable Binary Variables", "abstract": "The success of neural networks comes hand in hand with a desire for more interpretability. We focus on text classifiers and make them more interpretable by having them provide a justification\u2013a rationale\u2013for their predictions. We approach this problem by jointly training two neural network models: a latent model that selects a rationale (i.e. a short and informative part of the input text), and a classifier that learns from the words in the rationale alone. Previous work proposed to assign binary latent masks to input positions and to promote short selections via sparsity-inducing penalties such as L0 regularisation. We propose a latent model that mixes discrete and continuous behaviour allowing at the same time for binary selections and gradient-based training without REINFORCE. In our formulation, we can tractably compute the expected value of penalties such as L0, which allows us to directly optimise the model towards a pre-specified text selection rate. We show that our approach is competitive with previous work on rationale extraction, and explore further uses in attention mechanisms.", "doc_id": "1b9980041fb9b06bca6c83ad2936c3d1", "publication_year": 2019, "sentences": ["the success of neural networks comes hand in hand with a desire for more interpretability .", "we focus on text classifiers and make them more interpretable by having them provide a justification \u2013 a rationale \u2013 for their predictions .", "we approach this problem by jointly training two neural network models : a latent model that selects a rationale ( i . e . a short and informative part of the input text ) , and a classifier that learns from the words in the rationale alone .", "previous work proposed to assign binary latent masks to input positions and to promote short selections via sparsity - inducing penalties such as l0 regularisation .", "we propose a latent model that mixes discrete and continuous behaviour allowing at the same time for binary selections and gradient - based training without reinforce .", "in our formulation , we can tractably compute the expected value of penalties such as l0 , which allows us to directly optimise the model towards a pre - specified text selection rate .", "we show that our approach is competitive with previous work on rationale extraction , and explore further uses in attention mechanisms ."], "events": [{"event_type": "ITT", "arguments": [{"text": "neural networks", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["neural", "networks"], "offsets": [3, 4]}, {"text": "interpretability", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["interpretability"], "offsets": [14]}], "trigger": {"text": "desire", "tokens": ["desire"], "offsets": [11]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [16]}, {"text": "text classifiers", "nugget_type": "APP", "argument_type": "Content", "tokens": ["text", "classifiers"], "offsets": [19, 20]}], "trigger": {"text": "focus", "tokens": ["focus"], "offsets": [17]}}, {"event_type": "MDS", "arguments": [{"text": "justification", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["justification"], "offsets": [31]}, {"text": "make them more interpretable", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["make", "more", "interpretable"], "offsets": [22, 24, 25]}], "trigger": {"text": "provide", "tokens": ["provide"], "offsets": [29]}}, {"event_type": "PUR", "arguments": [{"text": "text classifiers", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["text", "classifiers"], "offsets": [19, 20]}], "trigger": {"text": "make them more interpretable", "tokens": ["make", "them", "more", "interpretable"], "offsets": [22, 23, 24, 25]}}, {"event_type": "MDS", "arguments": [{"text": "latent model", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["latent", "model"], "offsets": [53, 54]}, {"text": "classifier", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["classifier"], "offsets": [77]}], "trigger": {"text": "jointly training", "tokens": ["jointly", "training"], "offsets": [45, 46]}}, {"event_type": "MDS", "arguments": [{"text": "justification", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["justification"], "offsets": [31]}], "trigger": {"text": "selects", "tokens": ["selects"], "offsets": [56]}}, {"event_type": "MDS", "arguments": [{"text": "from the words in the rationale alone", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["from", "the", "words", "in", "the", "justification", "alone"], "offsets": [80, 81, 82, 83, 84, 31, 86]}], "trigger": {"text": "learns", "tokens": ["learns"], "offsets": [79]}}, {"event_type": "RWS", "arguments": [{"text": "previous work", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["previous", "work"], "offsets": [88, 89]}, {"text": "binary latent masks", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["binary", "latent", "masks"], "offsets": [93, 94, 95]}, {"text": "input", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["input"], "offsets": [97]}], "trigger": {"text": "assign", "tokens": ["assign"], "offsets": [92]}}, {"event_type": "PUR", "arguments": [{"text": "positions", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["positions"], "offsets": [98]}], "trigger": {"text": "input", "tokens": ["input"], "offsets": [97]}}, {"event_type": "RWS", "arguments": [{"text": "previous work", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["previous", "work"], "offsets": [88, 89]}, {"text": "sparsity - inducing penalties", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["sparsity", "-", "inducing", "penalties"], "offsets": [105, 106, 107, 108]}, {"text": "short selections", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["short", "selections"], "offsets": [102, 103]}], "trigger": {"text": "promote", "tokens": ["promote"], "offsets": [101]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [114]}, {"text": "latent model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["latent", "model"], "offsets": [117, 118]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [115]}}, {"event_type": "MDS", "arguments": [{"text": "binary selections", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["binary", "selections"], "offsets": [131, 132]}, {"text": "gradient - based training", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["gradient", "-", "based", "training"], "offsets": [134, 135, 136, 137]}, {"text": "discrete behaviour", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["discrete", "behaviour"], "offsets": [121, 124]}, {"text": "continuous behaviour", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["continuous", "behaviour"], "offsets": [123, 124]}], "trigger": {"text": "mixes", "tokens": ["mixes"], "offsets": [120]}}, {"event_type": "FIN", "arguments": [{"text": "competitive", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["competitive"], "offsets": [181]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [176]}}, {"event_type": "CMP", "arguments": [{"text": "latent model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["latent", "model"], "offsets": [117, 118]}, {"text": "competitive", "nugget_type": "STR", "argument_type": "Result", "tokens": ["competitive"], "offsets": [181]}, {"text": "previous work", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["previous", "work"], "offsets": [183, 184]}], "trigger": {"text": "competitive", "tokens": ["competitive"], "offsets": [181]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [175]}, {"text": "further uses", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["further", "uses"], "offsets": [191, 192]}, {"text": "in attention mechanisms", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "attention", "mechanisms"], "offsets": [193, 194, 195]}], "trigger": {"text": "explore", "tokens": ["explore"], "offsets": [190]}}], "document": ["the", "success", "of", "neural", "networks", "comes", "hand", "in", "hand", "with", "a", "desire", "for", "more", "interpretability", ".", "we", "focus", "on", "text", "classifiers", "and", "make", "them", "more", "interpretable", "by", "having", "them", "provide", "a", "justification", "\u2013", "a", "rationale", "\u2013", "for", "their", "predictions", ".", "we", "approach", "this", "problem", "by", "jointly", "training", "two", "neural", "network", "models", ":", "a", "latent", "model", "that", "selects", "a", "rationale", "(", "i", ".", "e", ".", "a", "short", "and", "informative", "part", "of", "the", "input", "text", ")", ",", "and", "a", "classifier", "that", "learns", "from", "the", "words", "in", "the", "rationale", "alone", ".", "previous", "work", "proposed", "to", "assign", "binary", "latent", "masks", "to", "input", "positions", "and", "to", "promote", "short", "selections", "via", "sparsity", "-", "inducing", "penalties", "such", "as", "l0", "regularisation", ".", "we", "propose", "a", "latent", "model", "that", "mixes", "discrete", "and", "continuous", "behaviour", "allowing", "at", "the", "same", "time", "for", "binary", "selections", "and", "gradient", "-", "based", "training", "without", "reinforce", ".", "in", "our", "formulation", ",", "we", "can", "tractably", "compute", "the", "expected", "value", "of", "penalties", "such", "as", "l0", ",", "which", "allows", "us", "to", "directly", "optimise", "the", "model", "towards", "a", "pre", "-", "specified", "text", "selection", "rate", ".", "we", "show", "that", "our", "approach", "is", "competitive", "with", "previous", "work", "on", "rationale", "extraction", ",", "and", "explore", "further", "uses", "in", "attention", "mechanisms", "."]}, {"venue": "ACL", "title": "Knowledgeable Prompt-tuning: Incorporating Knowledge into Prompt Verbalizer for Text Classification", "abstract": "Tuning pre-trained language models (PLMs) with task-specific prompts has been a promising approach for text classification. Particularly, previous studies suggest that prompt-tuning has remarkable superiority in the low-data scenario over the generic fine-tuning methods with extra classifiers. The core idea of prompt-tuning is to insert text pieces, i.e., template, to the input and transform a classification problem into a masked language modeling problem, where a crucial step is to construct a projection, i.e., verbalizer, between a label space and a label word space. A verbalizer is usually handcrafted or searched by gradient descent, which may lack coverage and bring considerable bias and high variances to the results. In this work, we focus on incorporating external knowledge into the verbalizer, forming a knowledgeable prompttuning (KPT), to improve and stabilize prompttuning. Specifically, we expand the label word space of the verbalizer using external knowledge bases (KBs) and refine the expanded label word space with the PLM itself before predicting with the expanded label word space. Extensive experiments on zero and few-shot text classification tasks demonstrate the effectiveness of knowledgeable prompt-tuning.", "doc_id": "2f33c21da0d3a8068aa96400985d35dc", "publication_year": 2022, "sentences": ["tuning pre - trained language models ( plms ) with task - specific prompts has been a promising approach for text classification .", "particularly , previous studies suggest that prompt - tuning has remarkable superiority in the low - data scenario over the generic fine - tuning methods with extra classifiers .", "the core idea of prompt - tuning is to insert text pieces , i . e . , template , to the input and transform a classification problem into a masked language modeling problem , where a crucial step is to construct a projection , i . e . , verbalizer , between a label space and a label word space .", "a verbalizer is usually handcrafted or searched by gradient descent , which may lack coverage and bring considerable bias and high variances to the results .", "in this work , we focus on incorporating external knowledge into the verbalizer , forming a knowledgeable prompttuning ( kpt ) , to improve and stabilize prompttuning .", "specifically , we expand the label word space of the verbalizer using external knowledge bases ( kbs ) and refine the expanded label word space with the plm itself before predicting with the expanded label word space .", "extensive experiments on zero and few - shot text classification tasks demonstrate the effectiveness of knowledgeable prompt - tuning ."], "events": [{"event_type": "ITT", "arguments": [{"text": "tuning pre - trained language models", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["tuning", "pre", "-", "trained", "language", "models"], "offsets": [0, 1, 2, 3, 4, 5]}, {"text": "with task - specific prompts", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "task", "-", "specific", "prompts"], "offsets": [9, 10, 11, 12, 13]}], "trigger": {"text": "promising approach", "tokens": ["promising", "approach"], "offsets": [17, 18]}}, {"event_type": "RWS", "arguments": [{"text": "input", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["input"], "offsets": [74]}, {"text": "template", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["template"], "offsets": [70]}], "trigger": {"text": "insert", "tokens": ["insert"], "offsets": [61]}}, {"event_type": "RWS", "arguments": [{"text": "classification problem", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["classification", "problem"], "offsets": [78, 79]}, {"text": "masked language modeling problem", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["masked", "language", "modeling", "problem"], "offsets": [82, 83, 84, 85]}], "trigger": {"text": "transform", "tokens": ["transform"], "offsets": [76]}}, {"event_type": "RWS", "arguments": [{"text": "between a label space and a label word space", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["between", "a", "label", "space", "and", "a", "label", "word", "space"], "offsets": [104, 105, 106, 107, 108, 109, 110, 111, 112]}, {"text": "verbalizer", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["verbalizer"], "offsets": [102]}], "trigger": {"text": "construct", "tokens": ["construct"], "offsets": [93]}}, {"event_type": "RWF", "arguments": [{"text": "verbalizer", "nugget_type": "MOD", "argument_type": "Concern", "tokens": ["verbalizer"], "offsets": [115]}, {"text": "lack", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["lack"], "offsets": [127]}], "trigger": {"text": "lack", "tokens": ["lack"], "offsets": [127]}}, {"event_type": "RWF", "arguments": [{"text": "verbalizer", "nugget_type": "MOD", "argument_type": "Concern", "tokens": ["verbalizer"], "offsets": [115]}, {"text": "considerable bias", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["considerable", "bias"], "offsets": [131, 132]}, {"text": "high variances", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["high", "variances"], "offsets": [134, 135]}, {"text": "results", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["results"], "offsets": [138]}], "trigger": {"text": "bring", "tokens": ["bring"], "offsets": [130]}}, {"event_type": "MDS", "arguments": [{"text": "external knowledge", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["external", "knowledge"], "offsets": [148, 149]}, {"text": "verbalizer", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["verbalizer"], "offsets": [152]}], "trigger": {"text": "incorporating", "tokens": ["incorporating"], "offsets": [147]}}, {"event_type": "PRP", "arguments": [{"text": "knowledgeable prompttuning", "nugget_type": "APP", "argument_type": "Content", "tokens": ["knowledgeable", "prompttuning"], "offsets": [156, 157]}, {"text": "improve and stabilize", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improve", "and", "stabilize"], "offsets": [163, 164, 165]}], "trigger": {"text": "forming", "tokens": ["forming"], "offsets": [154]}}, {"event_type": "PUR", "arguments": [{"text": "prompttuning", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["prompttuning"], "offsets": [166]}], "trigger": {"text": "improve and stabilize", "tokens": ["improve", "and", "stabilize"], "offsets": [163, 164, 165]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [170]}, {"text": "label word space of the verbalizer", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["label", "word", "space", "of", "the", "verbalizer"], "offsets": [173, 174, 175, 176, 177, 178]}, {"text": "using external knowledge bases", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["using", "external", "knowledge", "bases"], "offsets": [179, 180, 181, 182]}], "trigger": {"text": "expand", "tokens": ["expand"], "offsets": [171]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [170]}, {"text": "expanded label word space", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["expanded", "label", "word", "space"], "offsets": [189, 190, 191, 192]}, {"text": "with the plm itself before predicting with the expanded label word space", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "the", "pre", "-", "trained", "language", "models", "itself", "before", "predicting", "with", "the", "expanded", "label", "word", "space"], "offsets": [193, 194, 1, 2, 3, 4, 5, 196, 197, 198, 199, 200, 201, 202, 203, 204]}], "trigger": {"text": "refine", "tokens": ["refine"], "offsets": [187]}}, {"event_type": "FAC", "arguments": [{"text": "effectiveness of knowledgeable prompt - tuning", "nugget_type": "STR", "argument_type": "Object", "tokens": ["effectiveness", "of", "knowledgeable", "prompt", "-", "tuning"], "offsets": [219, 220, 221, 222, 223, 224]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [217]}}], "document": ["tuning", "pre", "-", "trained", "language", "models", "(", "plms", ")", "with", "task", "-", "specific", "prompts", "has", "been", "a", "promising", "approach", "for", "text", "classification", ".", "particularly", ",", "previous", "studies", "suggest", "that", "prompt", "-", "tuning", "has", "remarkable", "superiority", "in", "the", "low", "-", "data", "scenario", "over", "the", "generic", "fine", "-", "tuning", "methods", "with", "extra", "classifiers", ".", "the", "core", "idea", "of", "prompt", "-", "tuning", "is", "to", "insert", "text", "pieces", ",", "i", ".", "e", ".", ",", "template", ",", "to", "the", "input", "and", "transform", "a", "classification", "problem", "into", "a", "masked", "language", "modeling", "problem", ",", "where", "a", "crucial", "step", "is", "to", "construct", "a", "projection", ",", "i", ".", "e", ".", ",", "verbalizer", ",", "between", "a", "label", "space", "and", "a", "label", "word", "space", ".", "a", "verbalizer", "is", "usually", "handcrafted", "or", "searched", "by", "gradient", "descent", ",", "which", "may", "lack", "coverage", "and", "bring", "considerable", "bias", "and", "high", "variances", "to", "the", "results", ".", "in", "this", "work", ",", "we", "focus", "on", "incorporating", "external", "knowledge", "into", "the", "verbalizer", ",", "forming", "a", "knowledgeable", "prompttuning", "(", "kpt", ")", ",", "to", "improve", "and", "stabilize", "prompttuning", ".", "specifically", ",", "we", "expand", "the", "label", "word", "space", "of", "the", "verbalizer", "using", "external", "knowledge", "bases", "(", "kbs", ")", "and", "refine", "the", "expanded", "label", "word", "space", "with", "the", "plm", "itself", "before", "predicting", "with", "the", "expanded", "label", "word", "space", ".", "extensive", "experiments", "on", "zero", "and", "few", "-", "shot", "text", "classification", "tasks", "demonstrate", "the", "effectiveness", "of", "knowledgeable", "prompt", "-", "tuning", "."]}, {"venue": "ACL", "title": "Expanding Pretrained Models to Thousands More Languages via Lexicon-based Adaptation", "abstract": "The performance of multilingual pretrained models is highly dependent on the availability of monolingual or parallel text present in a target language. Thus, the majority of the world\u2019s languages cannot benefit from recent progress in NLP as they have no or limited textual data. To expand possibilities of using NLP technology in these under-represented languages, we systematically study strategies that relax the reliance on conventional language resources through the use of bilingual lexicons, an alternative resource with much better language coverage. We analyze different strategies to synthesize textual or labeled data using lexicons, and how this data can be combined with monolingual or parallel text when available. For 19 under-represented languages across 3 tasks, our methods lead to consistent improvements of up to 5 and 15 points with and without extra monolingual text respectively. Overall, our study highlights how NLP methods can be adapted to thousands more languages that are under-served by current technology.", "doc_id": "a7bed2dd836a0fe8c8ae997830b2ea1f", "publication_year": 2022, "sentences": ["the performance of multilingual pretrained models is highly dependent on the availability of monolingual or parallel text present in a target language .", "thus , the majority of the world \u2019 s languages cannot benefit from recent progress in nlp as they have no or limited textual data .", "to expand possibilities of using nlp technology in these under - represented languages , we systematically study strategies that relax the reliance on conventional language resources through the use of bilingual lexicons , an alternative resource with much better language coverage .", "we analyze different strategies to synthesize textual or labeled data using lexicons , and how this data can be combined with monolingual or parallel text when available .", "for 19 under - represented languages across 3 tasks , our methods lead to consistent improvements of up to 5 and 15 points with and without extra monolingual text respectively .", "overall , our study highlights how nlp methods can be adapted to thousands more languages that are under - served by current technology ."], "events": [{"event_type": "RWF", "arguments": [{"text": "highly dependent", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["highly", "dependent"], "offsets": [7, 8]}, {"text": "availability of monolingual or parallel text", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["availability", "of", "monolingual", "or", "parallel", "text"], "offsets": [11, 12, 13, 14, 15, 16]}], "trigger": {"text": "highly dependent", "tokens": ["highly", "dependent"], "offsets": [7, 8]}}, {"event_type": "RWF", "arguments": [{"text": "majority of the world \u2019 s languages", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["majority", "of", "the", "world", "\u2019", "s", "languages"], "offsets": [26, 27, 28, 29, 30, 31, 32]}, {"text": "no or limited textual data", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["no", "or", "limited", "textual", "data"], "offsets": [43, 44, 45, 46, 47]}], "trigger": {"text": "have", "tokens": ["have"], "offsets": [42]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [63]}, {"text": "strategies", "nugget_type": "APP", "argument_type": "Content", "tokens": ["strategies"], "offsets": [66]}, {"text": "expand", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["expand"], "offsets": [50]}], "trigger": {"text": "study", "tokens": ["study"], "offsets": [65]}}, {"event_type": "PUR", "arguments": [{"text": "possibilities of using nlp technology", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["possibilities", "of", "using", "nlp", "technology"], "offsets": [51, 52, 53, 54, 55]}, {"text": "in these under - represented languages", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "these", "under", "-", "represented", "languages"], "offsets": [56, 57, 58, 59, 60, 61]}], "trigger": {"text": "expand", "tokens": ["expand"], "offsets": [50]}}, {"event_type": "MDS", "arguments": [{"text": "bilingual lexicons", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["bilingual", "lexicons"], "offsets": [79, 80]}, {"text": "conventional language resources", "nugget_type": "DST", "argument_type": "BaseComponent", "tokens": ["conventional", "language", "resources"], "offsets": [72, 73, 74]}, {"text": "relax", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["relax"], "offsets": [68]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [77]}}, {"event_type": "PUR", "arguments": [{"text": "reliance", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["reliance"], "offsets": [70]}], "trigger": {"text": "relax", "tokens": ["relax"], "offsets": [68]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [91]}, {"text": "different strategies", "nugget_type": "APP", "argument_type": "Content", "tokens": ["different", "strategies"], "offsets": [93, 94]}, {"text": "synthesize", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["synthesize"], "offsets": [96]}], "trigger": {"text": "analyze", "tokens": ["analyze"], "offsets": [92]}}, {"event_type": "PUR", "arguments": [{"text": "textual", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["textual"], "offsets": [97]}, {"text": "labeled data", "nugget_type": "DST", "argument_type": "Aim", "tokens": ["labeled", "data"], "offsets": [99, 100]}, {"text": "using lexicons", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["using", "lexicons"], "offsets": [101, 102]}], "trigger": {"text": "synthesize", "tokens": ["synthesize"], "offsets": [96]}}, {"event_type": "MDS", "arguments": [{"text": "monolingual or parallel text", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["monolingual", "or", "parallel", "text"], "offsets": [112, 113, 114, 115]}, {"text": "this data", "nugget_type": "DST", "argument_type": "TriedComponent", "tokens": ["this", "data"], "offsets": [106, 107]}, {"text": "when available", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "available"], "offsets": [116, 117]}], "trigger": {"text": "combined", "tokens": ["combined"], "offsets": [110]}}, {"event_type": "FAC", "arguments": [{"text": "19 under - represented languages", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["19", "under", "-", "represented", "languages"], "offsets": [120, 121, 122, 123, 124]}, {"text": "across 3 tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["across", "3", "tasks"], "offsets": [125, 126, 127]}, {"text": "consistent improvements", "nugget_type": "STR", "argument_type": "Subject", "tokens": ["consistent", "improvements"], "offsets": [133, 134]}, {"text": "5 and 15 points", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["5", "and", "15", "points"], "offsets": [138, 139, 140, 141]}, {"text": "with and without extra monolingual text respectively", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "and", "without", "extra", "monolingual", "text", "respectively"], "offsets": [142, 143, 144, 145, 146, 147, 148]}], "trigger": {"text": "lead", "tokens": ["lead"], "offsets": [131]}}, {"event_type": "FAC", "arguments": [{"text": "nlp methods", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["nlp", "methods"], "offsets": [156, 157]}, {"text": "thousands more languages", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["thousands", "more", "languages"], "offsets": [162, 163, 164]}], "trigger": {"text": "adapted", "tokens": ["adapted"], "offsets": [160]}}], "document": ["the", "performance", "of", "multilingual", "pretrained", "models", "is", "highly", "dependent", "on", "the", "availability", "of", "monolingual", "or", "parallel", "text", "present", "in", "a", "target", "language", ".", "thus", ",", "the", "majority", "of", "the", "world", "\u2019", "s", "languages", "cannot", "benefit", "from", "recent", "progress", "in", "nlp", "as", "they", "have", "no", "or", "limited", "textual", "data", ".", "to", "expand", "possibilities", "of", "using", "nlp", "technology", "in", "these", "under", "-", "represented", "languages", ",", "we", "systematically", "study", "strategies", "that", "relax", "the", "reliance", "on", "conventional", "language", "resources", "through", "the", "use", "of", "bilingual", "lexicons", ",", "an", "alternative", "resource", "with", "much", "better", "language", "coverage", ".", "we", "analyze", "different", "strategies", "to", "synthesize", "textual", "or", "labeled", "data", "using", "lexicons", ",", "and", "how", "this", "data", "can", "be", "combined", "with", "monolingual", "or", "parallel", "text", "when", "available", ".", "for", "19", "under", "-", "represented", "languages", "across", "3", "tasks", ",", "our", "methods", "lead", "to", "consistent", "improvements", "of", "up", "to", "5", "and", "15", "points", "with", "and", "without", "extra", "monolingual", "text", "respectively", ".", "overall", ",", "our", "study", "highlights", "how", "nlp", "methods", "can", "be", "adapted", "to", "thousands", "more", "languages", "that", "are", "under", "-", "served", "by", "current", "technology", "."]}, {"venue": "ACL", "title": "Entailment Graph Learning with Textual Entailment and Soft Transitivity", "abstract": "Typed entailment graphs try to learn the entailment relations between predicates from text and model them as edges between predicate nodes. The construction of entailment graphs usually suffers from severe sparsity and unreliability of distributional similarity. We propose a two-stage method, Entailment Graph with Textual Entailment and Transitivity (EGT2). EGT2 learns the local entailment relations by recognizing the textual entailment between template sentences formed by typed CCG-parsed predicates. Based on the generated local graph, EGT2 then uses three novel soft transitivity constraints to consider the logical transitivity in entailment structures. Experiments on benchmark datasets show that EGT2 can well model the transitivity in entailment graph to alleviate the sparsity, and leads to signifcant improvement over current state-of-the-art methods.", "doc_id": "2f2f76e2a5f0d503e0b1c0ff7ff025aa", "publication_year": 2022, "sentences": ["typed entailment graphs try to learn the entailment relations between predicates from text and model them as edges between predicate nodes .", "the construction of entailment graphs usually suffers from severe sparsity and unreliability of distributional similarity .", "we propose a two - stage method , entailment graph with textual entailment and transitivity ( egt2 ) .", "egt2 learns the local entailment relations by recognizing the textual entailment between template sentences formed by typed ccg - parsed predicates .", "based on the generated local graph , egt2 then uses three novel soft transitivity constraints to consider the logical transitivity in entailment structures .", "experiments on benchmark datasets show that egt2 can well model the transitivity in entailment graph to alleviate the sparsity , and leads to signifcant improvement over current state - of - the - art methods ."], "events": [{"event_type": "ITT", "arguments": [{"text": "typed entailment graphs", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["typed", "entailment", "graphs"], "offsets": [0, 1, 2]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [5]}}, {"event_type": "RWF", "arguments": [{"text": "entailment graphs", "nugget_type": "FEA", "argument_type": "Concern", "tokens": ["entailment", "graphs"], "offsets": [25, 26]}, {"text": "severe", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["severe"], "offsets": [30]}, {"text": "sparsity", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["sparsity"], "offsets": [31]}, {"text": "unreliability of distributional similarity", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["unreliability", "of", "distributional", "similarity"], "offsets": [33, 34, 35, 36]}], "trigger": {"text": "suffers", "tokens": ["suffers"], "offsets": [28]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [38]}, {"text": "entailment graph with textual entailment and transitivity", "nugget_type": "APP", "argument_type": "Content", "tokens": ["entailment", "graph", "with", "textual", "entailment", "and", "transitivity"], "offsets": [46, 47, 48, 49, 50, 51, 52]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [39]}}, {"event_type": "MDS", "arguments": [{"text": "textual entailment", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["textual", "entailment"], "offsets": [66, 67]}, {"text": "learns", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["learns"], "offsets": [58]}], "trigger": {"text": "recognizing", "tokens": ["recognizing"], "offsets": [64]}}, {"event_type": "PUR", "arguments": [{"text": "local entailment relations", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["local", "entailment", "relations"], "offsets": [60, 61, 62]}], "trigger": {"text": "learns", "tokens": ["learns"], "offsets": [58]}}, {"event_type": "MDS", "arguments": [{"text": "typed ccg - parsed predicates", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["typed", "ccg", "-", "parsed", "predicates"], "offsets": [73, 74, 75, 76, 77]}, {"text": "template sentences", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["template", "sentences"], "offsets": [69, 70]}], "trigger": {"text": "formed", "tokens": ["formed"], "offsets": [71]}}, {"event_type": "WKS", "arguments": [{"text": "three novel soft transitivity constraints", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["three", "novel", "soft", "transitivity", "constraints"], "offsets": [89, 90, 91, 92, 93]}, {"text": "consider", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["consider"], "offsets": [95]}, {"text": "based on the generated local graph", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["based", "on", "the", "generated", "local", "graph"], "offsets": [79, 80, 81, 82, 83, 84]}], "trigger": {"text": "uses", "tokens": ["uses"], "offsets": [88]}}, {"event_type": "PUR", "arguments": [{"text": "logical transitivity", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["logical", "transitivity"], "offsets": [97, 98]}, {"text": "in entailment structures", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "entailment", "structures"], "offsets": [99, 100, 101]}], "trigger": {"text": "consider", "tokens": ["consider"], "offsets": [95]}}, {"event_type": "FIN", "arguments": [{"text": "model", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["model"], "offsets": [112]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [107]}}, {"event_type": "FAC", "arguments": [{"text": "benchmark datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["benchmark", "datasets"], "offsets": [105, 106]}, {"text": "entailment graph with textual entailment and transitivity", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["entailment", "graph", "with", "textual", "entailment", "and", "transitivity"], "offsets": [46, 47, 48, 49, 50, 51, 52]}, {"text": "well", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["well"], "offsets": [111]}, {"text": "transitivity", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["transitivity"], "offsets": [114]}, {"text": "entailment graph", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["entailment", "graph"], "offsets": [116, 117]}, {"text": "alleviate", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["alleviate"], "offsets": [119]}], "trigger": {"text": "model", "tokens": ["model"], "offsets": [112]}}, {"event_type": "PUR", "arguments": [{"text": "sparsity", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["sparsity"], "offsets": [121]}], "trigger": {"text": "alleviate", "tokens": ["alleviate"], "offsets": [119]}}, {"event_type": "FIN", "arguments": [{"text": "leads", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["leads"], "offsets": [124]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [107]}}, {"event_type": "CMP", "arguments": [{"text": "signifcant", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["signifcant"], "offsets": [126]}, {"text": "improvement", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improvement"], "offsets": [127]}, {"text": "current state - of - the - art methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["current", "state", "-", "of", "-", "the", "-", "art", "methods"], "offsets": [129, 130, 131, 132, 133, 134, 135, 136, 137]}, {"text": "entailment graph with textual entailment and transitivity", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["entailment", "graph", "with", "textual", "entailment", "and", "transitivity"], "offsets": [46, 47, 48, 49, 50, 51, 52]}, {"text": "benchmark datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["benchmark", "datasets"], "offsets": [105, 106]}], "trigger": {"text": "leads", "tokens": ["leads"], "offsets": [124]}}], "document": ["typed", "entailment", "graphs", "try", "to", "learn", "the", "entailment", "relations", "between", "predicates", "from", "text", "and", "model", "them", "as", "edges", "between", "predicate", "nodes", ".", "the", "construction", "of", "entailment", "graphs", "usually", "suffers", "from", "severe", "sparsity", "and", "unreliability", "of", "distributional", "similarity", ".", "we", "propose", "a", "two", "-", "stage", "method", ",", "entailment", "graph", "with", "textual", "entailment", "and", "transitivity", "(", "egt2", ")", ".", "egt2", "learns", "the", "local", "entailment", "relations", "by", "recognizing", "the", "textual", "entailment", "between", "template", "sentences", "formed", "by", "typed", "ccg", "-", "parsed", "predicates", ".", "based", "on", "the", "generated", "local", "graph", ",", "egt2", "then", "uses", "three", "novel", "soft", "transitivity", "constraints", "to", "consider", "the", "logical", "transitivity", "in", "entailment", "structures", ".", "experiments", "on", "benchmark", "datasets", "show", "that", "egt2", "can", "well", "model", "the", "transitivity", "in", "entailment", "graph", "to", "alleviate", "the", "sparsity", ",", "and", "leads", "to", "signifcant", "improvement", "over", "current", "state", "-", "of", "-", "the", "-", "art", "methods", "."]}, {"venue": "ACL", "title": "Composable Sparse Fine-Tuning for Cross-Lingual Transfer", "abstract": "Fine-tuning the entire set of parameters of a large pretrained model has become the mainstream approach for transfer learning. To increase its efficiency and prevent catastrophic forgetting and interference, techniques like adapters and sparse fine-tuning have been developed. Adapters are modular, as they can be combined to adapt a model towards different facets of knowledge (e.g., dedicated language and/or task adapters). Sparse fine-tuning is expressive, as it controls the behavior of all model components. In this work, we introduce a new fine-tuning method with both these desirable properties. In particular, we learn sparse, real-valued masks based on a simple variant of the Lottery Ticket Hypothesis. Task-specific masks are obtained from annotated data in a source language, and language-specific masks from masked language modeling in a target language. Both these masks can then be composed with the pretrained model. Unlike adapter-based fine-tuning, this method neither increases the number of parameters at inference time nor alters the original model architecture. Most importantly, it outperforms adapters in zero-shot cross-lingual transfer by a large margin in a series of multilingual benchmarks, including Universal Dependencies, MasakhaNER, and AmericasNLI. Based on an in-depth analysis, we additionally find that sparsity is crucial to prevent both 1) interference between the fine-tunings to be composed and 2) overfitting. We release the code and models at https://github.com/cambridgeltl/composable-sft.", "doc_id": "7f72c1fcaae7fcba398838759f20126f", "publication_year": 2022, "sentences": ["fine - tuning the entire set of parameters of a large pretrained model has become the mainstream approach for transfer learning .", "to increase its efficiency and prevent catastrophic forgetting and interference , techniques like adapters and sparse fine - tuning have been developed .", "adapters are modular , as they can be combined to adapt a model towards different facets of knowledge ( e . g . , dedicated language and / or task adapters ) .", "sparse fine - tuning is expressive , as it controls the behavior of all model components .", "in this work , we introduce a new fine - tuning method with both these desirable properties .", "in particular , we learn sparse , real - valued masks based on a simple variant of the lottery ticket hypothesis .", "task - specific masks are obtained from annotated data in a source language , and language - specific masks from masked language modeling in a target language .", "both these masks can then be composed with the pretrained model .", "unlike adapter - based fine - tuning , this method neither increases the number of parameters at inference time nor alters the original model architecture .", "most importantly , it outperforms adapters in zero - shot cross - lingual transfer by a large margin in a series of multilingual benchmarks , including universal dependencies , masakhaner , and americasnli .", "based on an in - depth analysis , we additionally find that sparsity is crucial to prevent both 1 ) interference between the fine - tunings to be composed and 2 ) overfitting .", "we release the code and models at https : / / github . com / cambridgeltl / composable - sft ."], "events": [{"event_type": "ITT", "arguments": [{"text": "sparse fine - tuning", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["sparse", "fine", "-", "tuning"], "offsets": [78, 79, 80, 81]}], "trigger": {"text": "expressive", "tokens": ["expressive"], "offsets": [83]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [99]}, {"text": "fine - tuning method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["fine", "-", "tuning", "method"], "offsets": [103, 104, 105, 106]}], "trigger": {"text": "introduce", "tokens": ["introduce"], "offsets": [100]}}, {"event_type": "MDS", "arguments": [{"text": "sparse , real - valued masks", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["sparse", ",", "real", "-", "valued", "masks"], "offsets": [118, 119, 120, 121, 122, 123]}, {"text": "based on a simple variant of the lottery ticket hypothesis", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["based", "on", "a", "simple", "variant", "of", "the", "lottery", "ticket", "hypothesis"], "offsets": [124, 125, 126, 127, 128, 129, 130, 131, 132, 133]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [117]}}, {"event_type": "MDS", "arguments": [{"text": "annotated data", "nugget_type": "DST", "argument_type": "BaseComponent", "tokens": ["annotated", "data"], "offsets": [142, 143]}, {"text": "task - specific masks", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["task", "-", "specific", "masks"], "offsets": [135, 136, 137, 138]}, {"text": "language - specific masks", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["language", "-", "specific", "masks"], "offsets": [150, 151, 152, 153]}, {"text": "masked language modeling", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["masked", "language", "modeling"], "offsets": [155, 156, 157]}, {"text": "in a source language", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "a", "source", "language"], "offsets": [144, 145, 146, 147]}, {"text": "in a target language", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "a", "target", "language"], "offsets": [158, 159, 160, 161]}], "trigger": {"text": "obtained", "tokens": ["obtained"], "offsets": [140]}}, {"event_type": "MDS", "arguments": [{"text": "pretrained model", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["pretrained", "model"], "offsets": [172, 173]}, {"text": "task - specific masks", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["task", "-", "specific", "masks"], "offsets": [135, 136, 137, 138]}, {"text": "language - specific masks", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["language", "-", "specific", "masks"], "offsets": [150, 151, 152, 153]}], "trigger": {"text": "composed", "tokens": ["composed"], "offsets": [169]}}, {"event_type": "CMP", "arguments": [{"text": "adapters", "nugget_type": "MOD", "argument_type": "Arg2", "tokens": ["adapters"], "offsets": [206]}, {"text": "zero - shot cross - lingual transfer", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["zero", "-", "shot", "cross", "-", "lingual", "transfer"], "offsets": [208, 209, 210, 211, 212, 213, 214]}, {"text": "in a series of multilingual benchmarks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "a", "series", "of", "multilingual", "benchmarks"], "offsets": [219, 220, 221, 222, 223, 224]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [205]}, {"text": "fine - tuning method", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["fine", "-", "tuning", "method"], "offsets": [103, 104, 105, 106]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [205]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [243]}, {"text": "crucial", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["crucial"], "offsets": [249]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [245]}}, {"event_type": "FAC", "arguments": [{"text": "sparsity", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["sparsity"], "offsets": [247]}, {"text": "prevent", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["prevent"], "offsets": [251]}], "trigger": {"text": "crucial", "tokens": ["crucial"], "offsets": [249]}}, {"event_type": "PUR", "arguments": [{"text": "interference", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["interference"], "offsets": [255]}, {"text": "overfitting", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["overfitting"], "offsets": [267]}, {"text": "between the fine - tunings to be composed", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["between", "the", "fine", "-", "tunings", "to", "be", "composed"], "offsets": [256, 257, 258, 259, 260, 261, 262, 263]}], "trigger": {"text": "prevent", "tokens": ["prevent"], "offsets": [251]}}], "document": ["fine", "-", "tuning", "the", "entire", "set", "of", "parameters", "of", "a", "large", "pretrained", "model", "has", "become", "the", "mainstream", "approach", "for", "transfer", "learning", ".", "to", "increase", "its", "efficiency", "and", "prevent", "catastrophic", "forgetting", "and", "interference", ",", "techniques", "like", "adapters", "and", "sparse", "fine", "-", "tuning", "have", "been", "developed", ".", "adapters", "are", "modular", ",", "as", "they", "can", "be", "combined", "to", "adapt", "a", "model", "towards", "different", "facets", "of", "knowledge", "(", "e", ".", "g", ".", ",", "dedicated", "language", "and", "/", "or", "task", "adapters", ")", ".", "sparse", "fine", "-", "tuning", "is", "expressive", ",", "as", "it", "controls", "the", "behavior", "of", "all", "model", "components", ".", "in", "this", "work", ",", "we", "introduce", "a", "new", "fine", "-", "tuning", "method", "with", "both", "these", "desirable", "properties", ".", "in", "particular", ",", "we", "learn", "sparse", ",", "real", "-", "valued", "masks", "based", "on", "a", "simple", "variant", "of", "the", "lottery", "ticket", "hypothesis", ".", "task", "-", "specific", "masks", "are", "obtained", "from", "annotated", "data", "in", "a", "source", "language", ",", "and", "language", "-", "specific", "masks", "from", "masked", "language", "modeling", "in", "a", "target", "language", ".", "both", "these", "masks", "can", "then", "be", "composed", "with", "the", "pretrained", "model", ".", "unlike", "adapter", "-", "based", "fine", "-", "tuning", ",", "this", "method", "neither", "increases", "the", "number", "of", "parameters", "at", "inference", "time", "nor", "alters", "the", "original", "model", "architecture", ".", "most", "importantly", ",", "it", "outperforms", "adapters", "in", "zero", "-", "shot", "cross", "-", "lingual", "transfer", "by", "a", "large", "margin", "in", "a", "series", "of", "multilingual", "benchmarks", ",", "including", "universal", "dependencies", ",", "masakhaner", ",", "and", "americasnli", ".", "based", "on", "an", "in", "-", "depth", "analysis", ",", "we", "additionally", "find", "that", "sparsity", "is", "crucial", "to", "prevent", "both", "1", ")", "interference", "between", "the", "fine", "-", "tunings", "to", "be", "composed", "and", "2", ")", "overfitting", ".", "we", "release", "the", "code", "and", "models", "at", "https", ":", "/", "/", "github", ".", "com", "/", "cambridgeltl", "/", "composable", "-", "sft", "."]}, {"venue": "ACL", "title": "Open Vocabulary Learning for Neural Chinese Pinyin IME", "abstract": "Pinyin-to-character (P2C) conversion is the core component of pinyin-based Chinese input method engine (IME). However, the conversion is seriously compromised by the ambiguities of Chinese characters corresponding to pinyin as well as the predefined fixed vocabularies. To alleviate such inconveniences, we propose a neural P2C conversion model augmented by an online updated vocabulary with a sampling mechanism to support open vocabulary learning during IME working. Our experiments show that the proposed method outperforms commercial IMEs and state-of-the-art traditional models on standard corpus and true inputting history dataset in terms of multiple metrics and thus the online updated vocabulary indeed helps our IME effectively follows user inputting behavior.", "doc_id": "fa4b1d9102d7dc6fdc4fe71f072e36aa", "publication_year": 2019, "sentences": ["pinyin - to - character ( p2c ) conversion is the core component of pinyin - based chinese input method engine ( ime ) .", "however , the conversion is seriously compromised by the ambiguities of chinese characters corresponding to pinyin as well as the predefined fixed vocabularies .", "to alleviate such inconveniences , we propose a neural p2c conversion model augmented by an online updated vocabulary with a sampling mechanism to support open vocabulary learning during ime working .", "our experiments show that the proposed method outperforms commercial imes and state - of - the - art traditional models on standard corpus and true inputting history dataset in terms of multiple metrics and thus the online updated vocabulary indeed helps our ime effectively follows user inputting behavior ."], "events": [{"event_type": "ITT", "arguments": [{"text": "pinyin - based chinese input method", "nugget_type": "APP", "argument_type": "Target", "tokens": ["pinyin", "-", "based", "chinese", "input", "method"], "offsets": [14, 15, 16, 17, 18, 19]}], "trigger": {"text": "component", "tokens": ["component"], "offsets": [12]}}, {"event_type": "RWF", "arguments": [{"text": "ambiguities of chinese characters", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["ambiguities", "of", "chinese", "characters"], "offsets": [34, 35, 36, 37]}, {"text": "conversion", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["pinyin", "-", "to", "-", "character", "conversion"], "offsets": [0, 1, 2, 3, 4, 8]}, {"text": "compromised", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["compromised"], "offsets": [31]}, {"text": "seriously", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["seriously"], "offsets": [30]}], "trigger": {"text": "compromised", "tokens": ["compromised"], "offsets": [31]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [54]}, {"text": "neural p2c conversion model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["neural", "p2c", "conversion", "model"], "offsets": [57, 58, 59, 60]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [55]}}, {"event_type": "MDS", "arguments": [{"text": "online updated vocabulary", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["online", "updated", "vocabulary"], "offsets": [64, 65, 66]}, {"text": "during ime working", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["during", "input", "method", "engine", "working"], "offsets": [76, 18, 19, 20, 78]}, {"text": "open vocabulary learning", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["open", "vocabulary", "learning"], "offsets": [73, 74, 75]}, {"text": "with a sampling mechanism", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "a", "sampling", "mechanism"], "offsets": [67, 68, 69, 70]}], "trigger": {"text": "support", "tokens": ["support"], "offsets": [72]}}, {"event_type": "CMP", "arguments": [{"text": "commercial imes", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["commercial", "imes"], "offsets": [88, 89]}, {"text": "state - of - the - art traditional models", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art", "traditional", "models"], "offsets": [91, 92, 93, 94, 95, 96, 97, 98, 99]}, {"text": "standard corpus", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["standard", "corpus"], "offsets": [101, 102]}, {"text": "true inputting history dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["true", "inputting", "history", "dataset"], "offsets": [104, 105, 106, 107]}, {"text": "multiple metrics", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["multiple", "metrics"], "offsets": [111, 112]}, {"text": "neural p2c conversion model", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["neural", "p2c", "conversion", "model"], "offsets": [57, 58, 59, 60]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [87]}}, {"event_type": "FAC", "arguments": [{"text": "online updated vocabulary", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["online", "updated", "vocabulary"], "offsets": [116, 117, 118]}, {"text": "follows", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["follows"], "offsets": [124]}, {"text": "ime", "nugget_type": "APP", "argument_type": "Object", "tokens": ["input", "method", "engine"], "offsets": [18, 19, 20]}], "trigger": {"text": "helps", "tokens": ["helps"], "offsets": [120]}}, {"event_type": "PUR", "arguments": [{"text": "user inputting behavior", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["user", "inputting", "behavior"], "offsets": [125, 126, 127]}], "trigger": {"text": "follows", "tokens": ["follows"], "offsets": [124]}}, {"event_type": "FIN", "arguments": [{"text": "outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforms"], "offsets": [87]}, {"text": "helps", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["helps"], "offsets": [120]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [82]}}], "document": ["pinyin", "-", "to", "-", "character", "(", "p2c", ")", "conversion", "is", "the", "core", "component", "of", "pinyin", "-", "based", "chinese", "input", "method", "engine", "(", "ime", ")", ".", "however", ",", "the", "conversion", "is", "seriously", "compromised", "by", "the", "ambiguities", "of", "chinese", "characters", "corresponding", "to", "pinyin", "as", "well", "as", "the", "predefined", "fixed", "vocabularies", ".", "to", "alleviate", "such", "inconveniences", ",", "we", "propose", "a", "neural", "p2c", "conversion", "model", "augmented", "by", "an", "online", "updated", "vocabulary", "with", "a", "sampling", "mechanism", "to", "support", "open", "vocabulary", "learning", "during", "ime", "working", ".", "our", "experiments", "show", "that", "the", "proposed", "method", "outperforms", "commercial", "imes", "and", "state", "-", "of", "-", "the", "-", "art", "traditional", "models", "on", "standard", "corpus", "and", "true", "inputting", "history", "dataset", "in", "terms", "of", "multiple", "metrics", "and", "thus", "the", "online", "updated", "vocabulary", "indeed", "helps", "our", "ime", "effectively", "follows", "user", "inputting", "behavior", "."]}, {"venue": "ACL", "title": "Robust Knowledge Graph Completion with Stacked Convolutions and a Student Re-Ranking Network", "abstract": "Knowledge Graph (KG) completion research usually focuses on densely connected benchmark datasets that are not representative of real KGs. We curate two KG datasets that include biomedical and encyclopedic knowledge and use an existing commonsense KG dataset to explore KG completion in the more realistic setting where dense connectivity is not guaranteed. We develop a deep convolutional network that utilizes textual entity representations and demonstrate that our model outperforms recent KG completion methods in this challenging setting. We find that our model\u2019s performance improvements stem primarily from its robustness to sparsity. We then distill the knowledge from the convolutional network into a student network that re-ranks promising candidate entities. This re-ranking stage leads to further improvements in performance and demonstrates the effectiveness of entity re-ranking for KG completion.", "doc_id": "f759023d55386ba9f605e8bc41778619", "publication_year": 2021, "sentences": ["knowledge graph ( kg ) completion research usually focuses on densely connected benchmark datasets that are not representative of real kgs .", "we curate two kg datasets that include biomedical and encyclopedic knowledge and use an existing commonsense kg dataset to explore kg completion in the more realistic setting where dense connectivity is not guaranteed .", "we develop a deep convolutional network that utilizes textual entity representations and demonstrate that our model outperforms recent kg completion methods in this challenging setting .", "we find that our model \u2019 s performance improvements stem primarily from its robustness to sparsity .", "we then distill the knowledge from the convolutional network into a student network that re - ranks promising candidate entities .", "this re - ranking stage leads to further improvements in performance and demonstrates the effectiveness of entity re - ranking for kg completion ."], "events": [{"event_type": "ITT", "arguments": [{"text": "knowledge graph ( kg ) completion research", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["knowledge", "graph", "completion", "research"], "offsets": [0, 1, 5, 6]}], "trigger": {"text": "focuses", "tokens": ["focuses"], "offsets": [8]}}, {"event_type": "RWF", "arguments": [{"text": "not representative", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["not", "representative"], "offsets": [16, 17]}, {"text": "benchmark datasets", "nugget_type": "DST", "argument_type": "Concern", "tokens": ["benchmark", "datasets"], "offsets": [12, 13]}], "trigger": {"text": "not representative", "tokens": ["not", "representative"], "offsets": [16, 17]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [22]}, {"text": "two kg datasets", "nugget_type": "DST", "argument_type": "Content", "tokens": ["two", "knowledge", "graph", "datasets"], "offsets": [24, 0, 1, 26]}], "trigger": {"text": "curate", "tokens": ["curate"], "offsets": [23]}}, {"event_type": "MDS", "arguments": [{"text": "existing commonsense kg dataset", "nugget_type": "DST", "argument_type": "BaseComponent", "tokens": ["existing", "commonsense", "kg", "dataset"], "offsets": [36, 37, 38, 39]}, {"text": "explore", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["explore"], "offsets": [41]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [34]}}, {"event_type": "PUR", "arguments": [{"text": "kg completion", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["knowledge", "graph", "completion"], "offsets": [0, 1, 43]}, {"text": "in the more realistic setting where dense connectivity is not guaranteed", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "more", "realistic", "setting", "where", "dense", "connectivity", "is", "not", "guaranteed"], "offsets": [44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54]}], "trigger": {"text": "explore", "tokens": ["explore"], "offsets": [41]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [56]}, {"text": "deep convolutional network", "nugget_type": "APP", "argument_type": "Content", "tokens": ["deep", "convolutional", "network"], "offsets": [59, 60, 61]}], "trigger": {"text": "develop", "tokens": ["develop"], "offsets": [57]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [56]}, {"text": "outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforms"], "offsets": [72]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [68]}}, {"event_type": "CMP", "arguments": [{"text": "deep convolutional network", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["deep", "convolutional", "network"], "offsets": [59, 60, 61]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [72]}, {"text": "recent kg completion methods", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["recent", "knowledge", "graph", "completion", "methods"], "offsets": [73, 0, 1, 75, 76]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [72]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [82]}, {"text": "stem", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["stem"], "offsets": [91]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [83]}}, {"event_type": "FAC", "arguments": [{"text": "robustness to sparsity", "nugget_type": "STR", "argument_type": "Object", "tokens": ["robustness", "to", "sparsity"], "offsets": [95, 96, 97]}, {"text": "performance improvements", "nugget_type": "STR", "argument_type": "Subject", "tokens": ["performance", "improvements"], "offsets": [89, 90]}], "trigger": {"text": "stem", "tokens": ["stem"], "offsets": [91]}}, {"event_type": "MDS", "arguments": [{"text": "knowledge", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["knowledge"], "offsets": [103]}, {"text": "student network", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["student", "network"], "offsets": [110, 111]}], "trigger": {"text": "distill", "tokens": ["distill"], "offsets": [101]}}, {"event_type": "FAC", "arguments": [{"text": "re - ranking stage", "nugget_type": "MOD", "argument_type": "Subject", "tokens": ["re", "-", "ranking", "stage"], "offsets": [121, 122, 123, 124]}, {"text": "improvements in performance", "nugget_type": "STR", "argument_type": "Object", "tokens": ["improvements", "in", "performance"], "offsets": [128, 129, 130]}], "trigger": {"text": "leads", "tokens": ["leads"], "offsets": [125]}}, {"event_type": "FAC", "arguments": [{"text": "effectiveness of entity re - ranking", "nugget_type": "STR", "argument_type": "Subject", "tokens": ["effectiveness", "of", "entity", "re", "-", "ranking"], "offsets": [134, 135, 136, 137, 138, 139]}, {"text": "kg completion", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["knowledge", "graph", "completion"], "offsets": [0, 1, 142]}], "trigger": {"text": "demonstrates", "tokens": ["demonstrates"], "offsets": [132]}}], "document": ["knowledge", "graph", "(", "kg", ")", "completion", "research", "usually", "focuses", "on", "densely", "connected", "benchmark", "datasets", "that", "are", "not", "representative", "of", "real", "kgs", ".", "we", "curate", "two", "kg", "datasets", "that", "include", "biomedical", "and", "encyclopedic", "knowledge", "and", "use", "an", "existing", "commonsense", "kg", "dataset", "to", "explore", "kg", "completion", "in", "the", "more", "realistic", "setting", "where", "dense", "connectivity", "is", "not", "guaranteed", ".", "we", "develop", "a", "deep", "convolutional", "network", "that", "utilizes", "textual", "entity", "representations", "and", "demonstrate", "that", "our", "model", "outperforms", "recent", "kg", "completion", "methods", "in", "this", "challenging", "setting", ".", "we", "find", "that", "our", "model", "\u2019", "s", "performance", "improvements", "stem", "primarily", "from", "its", "robustness", "to", "sparsity", ".", "we", "then", "distill", "the", "knowledge", "from", "the", "convolutional", "network", "into", "a", "student", "network", "that", "re", "-", "ranks", "promising", "candidate", "entities", ".", "this", "re", "-", "ranking", "stage", "leads", "to", "further", "improvements", "in", "performance", "and", "demonstrates", "the", "effectiveness", "of", "entity", "re", "-", "ranking", "for", "kg", "completion", "."]}, {"venue": "ACL", "title": "S4-Tuning: A Simple Cross-lingual Sub-network Tuning Method", "abstract": "The emergence of multilingual pre-trained language models makes it possible to adapt to target languages with only few labeled examples.However, vanilla fine-tuning tends to achieve degenerated and unstable results, owing to the Language Interference among different languages, and Parameter Overload under the few-sample transfer learning scenarios.To address two problems elegantly, we propose S4-Tuning, a Simple Cross-lingual Sub-network Tuning method. S4-Tuning first detects the most essential sub-network for each target language, and only updates it during fine-tuning.In this way, the language sub-networks lower the scale of trainable parameters, and hence better suit the low-resource scenarios.Meanwhile, the commonality and characteristics across languages are modeled by the overlapping and non-overlapping parts to ease the interference among languages.Simple but effective, S4-Tuning gains consistent improvements over vanilla fine-tuning on three multi-lingual tasks involving 37 different languages in total (XNLI, PAWS-X, and Tatoeba).", "doc_id": "ab48245cf7dac23e4294cf9ab184327d", "publication_year": 2022, "sentences": ["the emergence of multilingual pre - trained language models makes it possible to adapt to target languages with only few labeled examples .", "however , vanilla fine - tuning tends to achieve degenerated and unstable results , owing to the language interference among different languages , and parameter overload under the few - sample transfer learning scenarios .", "to address two problems elegantly , we propose s4 - tuning , a simple cross - lingual sub - network tuning method .", "s4 - tuning first detects the most essential sub - network for each target language , and only updates it during fine - tuning .", "in this way , the language sub - networks lower the scale of trainable parameters , and hence better suit the low - resource scenarios .", "meanwhile , the commonality and characteristics across languages are modeled by the overlapping and non - overlapping parts to ease the interference among languages .", "simple but effective , s4 - tuning gains consistent improvements over vanilla fine - tuning on three multi - lingual tasks involving 37 different languages in total ( xnli , paws - x , and tatoeba ) ."], "events": [{"event_type": "ITT", "arguments": [{"text": "multilingual pre - trained language models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["multilingual", "pre", "-", "trained", "language", "models"], "offsets": [3, 4, 5, 6, 7, 8]}], "trigger": {"text": "makes", "tokens": ["makes"], "offsets": [9]}}, {"event_type": "RWF", "arguments": [{"text": "language interference", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["language", "interference"], "offsets": [40, 41]}, {"text": "parameter overload", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["parameter", "overload"], "offsets": [47, 48]}], "trigger": {"text": "owing", "tokens": ["owing"], "offsets": [37]}}, {"event_type": "PRP", "arguments": [{"text": "address", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["address"], "offsets": [59]}, {"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [64]}, {"text": "simple cross - lingual sub - network tuning method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["simple", "cross", "-", "lingual", "sub", "-", "network", "tuning", "method"], "offsets": [71, 72, 73, 74, 75, 76, 77, 78, 79]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [65]}}, {"event_type": "PUR", "arguments": [{"text": "degenerated and unstable results", "nugget_type": "WEA", "argument_type": "Aim", "tokens": ["degenerated", "and", "unstable", "results"], "offsets": [32, 33, 34, 35]}], "trigger": {"text": "address", "tokens": ["address"], "offsets": [59]}}, {"event_type": "CMP", "arguments": [{"text": "vanilla fine - tuning", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["vanilla", "fine", "-", "tuning"], "offsets": [168, 169, 170, 171]}, {"text": "consistent improvements", "nugget_type": "STR", "argument_type": "Result", "tokens": ["consistent", "improvements"], "offsets": [165, 166]}, {"text": "s4 - tuning", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["s4", "-", "tuning"], "offsets": [161, 162, 163]}, {"text": "on three multi - lingual tasks involving 37 different languages", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "three", "multi", "-", "lingual", "tasks", "involving", "37", "different", "languages"], "offsets": [172, 173, 174, 175, 176, 177, 178, 179, 180, 181]}], "trigger": {"text": "gains", "tokens": ["gains"], "offsets": [164]}}, {"event_type": "RWF", "arguments": [{"text": "vanilla fine - tuning", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["vanilla", "fine", "-", "tuning"], "offsets": [25, 26, 27, 28]}, {"text": "degenerated", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["degenerated"], "offsets": [32]}, {"text": "unstable results", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["unstable", "results"], "offsets": [34, 35]}], "trigger": {"text": "achieve", "tokens": ["achieve"], "offsets": [31]}}, {"event_type": "MDS", "arguments": [{"text": "most essential sub - network", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["most", "essential", "sub", "-", "network"], "offsets": [87, 88, 89, 90, 91]}, {"text": "each target language", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["each", "target", "language"], "offsets": [93, 94, 95]}], "trigger": {"text": "detects", "tokens": ["detects"], "offsets": [85]}}, {"event_type": "MDS", "arguments": [{"text": "most essential sub - network", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["most", "essential", "sub", "-", "network"], "offsets": [87, 88, 89, 90, 91]}, {"text": "during fine - tuning", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["during", "fine", "-", "tuning"], "offsets": [101, 102, 103, 104]}], "trigger": {"text": "only updates", "tokens": ["only", "updates"], "offsets": [98, 99]}}, {"event_type": "MDS", "arguments": [{"text": "ease", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["ease"], "offsets": [151]}, {"text": "overlapping parts", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["overlapping", "parts"], "offsets": [144, 149]}, {"text": "non - overlapping parts", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["non", "-", "overlapping", "parts"], "offsets": [146, 147, 148, 149]}, {"text": "commonality and characteristics across languages", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["commonality", "and", "characteristics", "across", "languages"], "offsets": [135, 136, 137, 138, 139]}], "trigger": {"text": "modeled", "tokens": ["modeled"], "offsets": [141]}}, {"event_type": "PUR", "arguments": [{"text": "interference among languages", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["interference", "among", "languages"], "offsets": [153, 154, 155]}], "trigger": {"text": "ease", "tokens": ["ease"], "offsets": [151]}}], "document": ["the", "emergence", "of", "multilingual", "pre", "-", "trained", "language", "models", "makes", "it", "possible", "to", "adapt", "to", "target", "languages", "with", "only", "few", "labeled", "examples", ".", "however", ",", "vanilla", "fine", "-", "tuning", "tends", "to", "achieve", "degenerated", "and", "unstable", "results", ",", "owing", "to", "the", "language", "interference", "among", "different", "languages", ",", "and", "parameter", "overload", "under", "the", "few", "-", "sample", "transfer", "learning", "scenarios", ".", "to", "address", "two", "problems", "elegantly", ",", "we", "propose", "s4", "-", "tuning", ",", "a", "simple", "cross", "-", "lingual", "sub", "-", "network", "tuning", "method", ".", "s4", "-", "tuning", "first", "detects", "the", "most", "essential", "sub", "-", "network", "for", "each", "target", "language", ",", "and", "only", "updates", "it", "during", "fine", "-", "tuning", ".", "in", "this", "way", ",", "the", "language", "sub", "-", "networks", "lower", "the", "scale", "of", "trainable", "parameters", ",", "and", "hence", "better", "suit", "the", "low", "-", "resource", "scenarios", ".", "meanwhile", ",", "the", "commonality", "and", "characteristics", "across", "languages", "are", "modeled", "by", "the", "overlapping", "and", "non", "-", "overlapping", "parts", "to", "ease", "the", "interference", "among", "languages", ".", "simple", "but", "effective", ",", "s4", "-", "tuning", "gains", "consistent", "improvements", "over", "vanilla", "fine", "-", "tuning", "on", "three", "multi", "-", "lingual", "tasks", "involving", "37", "different", "languages", "in", "total", "(", "xnli", ",", "paws", "-", "x", ",", "and", "tatoeba", ")", "."]}, {"venue": "ACL", "title": "SeqVAT: Virtual Adversarial Training for Semi-Supervised Sequence Labeling", "abstract": "Virtual adversarial training (VAT) is a powerful technique to improve model robustness in both supervised and semi-supervised settings. It is effective and can be easily adopted on lots of image classification and text classification tasks. However, its benefits to sequence labeling tasks such as named entity recognition (NER) have not been shown as significant, mostly, because the previous approach can not combine VAT with the conditional random field (CRF). CRF can significantly boost accuracy for sequence models by putting constraints on label transitions, which makes it an essential component in most state-of-the-art sequence labeling model architectures. In this paper, we propose SeqVAT, a method which naturally applies VAT to sequence labeling models with CRF. Empirical studies show that SeqVAT not only significantly improves the sequence labeling performance over baselines under supervised settings, but also outperforms state-of-the-art approaches under semi-supervised settings.", "doc_id": "de35f0b671cf6e32399190f3c614ba6c", "publication_year": 2020, "sentences": ["virtual adversarial training ( vat ) is a powerful technique to improve model robustness in both supervised and semi - supervised settings .", "it is effective and can be easily adopted on lots of image classification and text classification tasks .", "however , its benefits to sequence labeling tasks such as named entity recognition ( ner ) have not been shown as significant , mostly , because the previous approach can not combine vat with the conditional random field ( crf ) .", "crf can significantly boost accuracy for sequence models by putting constraints on label transitions , which makes it an essential component in most state - of - the - art sequence labeling model architectures .", "in this paper , we propose seqvat , a method which naturally applies vat to sequence labeling models with crf .", "empirical studies show that seqvat not only significantly improves the sequence labeling performance over baselines under supervised settings , but also outperforms state - of - the - art approaches under semi - supervised settings ."], "events": [{"event_type": "ITT", "arguments": [{"text": "virtual adversarial training", "nugget_type": "APP", "argument_type": "Target", "tokens": ["virtual", "adversarial", "training"], "offsets": [0, 1, 2]}], "trigger": {"text": "technique", "tokens": ["technique"], "offsets": [9]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [122]}, {"text": "seqvat", "nugget_type": "APP", "argument_type": "Content", "tokens": ["seqvat"], "offsets": [124]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [123]}}, {"event_type": "MDS", "arguments": [{"text": "virtual adversarial training", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["virtual", "adversarial", "training"], "offsets": [0, 1, 2]}, {"text": "sequence labeling models with crf", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["sequence", "labeling", "models", "with", "conditional", "random", "field"], "offsets": [133, 134, 135, 136, 76, 77, 78]}], "trigger": {"text": "applies", "tokens": ["applies"], "offsets": [130]}}, {"event_type": "FIN", "arguments": [{"text": "outperforms", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperforms"], "offsets": [160]}, {"text": "significantly improves", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["significantly", "improves"], "offsets": [146, 147]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [141]}}, {"event_type": "CMP", "arguments": [{"text": "seqvat", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["seqvat"], "offsets": [143]}, {"text": "outperforms", "nugget_type": "STR", "argument_type": "Result", "tokens": ["outperforms"], "offsets": [160]}, {"text": "state - of - the - art approaches", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["state", "-", "of", "-", "the", "-", "art", "approaches"], "offsets": [161, 162, 163, 164, 165, 166, 167, 168]}], "trigger": {"text": "outperforms", "tokens": ["outperforms"], "offsets": [160]}}, {"event_type": "RWF", "arguments": [{"text": "previous approach", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["previous", "approach"], "offsets": [68, 69]}, {"text": "not combine", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["not", "combine"], "offsets": [71, 72]}, {"text": "vat", "nugget_type": "APP", "argument_type": "Fault", "tokens": ["virtual", "adversarial", "training"], "offsets": [0, 1, 2]}], "trigger": {"text": "not combine", "tokens": ["not", "combine"], "offsets": [71, 72]}}, {"event_type": "FAC", "arguments": [{"text": "seqvat", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["seqvat"], "offsets": [143]}, {"text": "sequence labeling performance over baselines", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["sequence", "labeling", "performance", "over", "baselines"], "offsets": [149, 150, 151, 152, 153]}, {"text": "under supervised settings", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["under", "supervised", "settings"], "offsets": [154, 155, 156]}], "trigger": {"text": "significantly improves", "tokens": ["significantly", "improves"], "offsets": [146, 147]}}], "document": ["virtual", "adversarial", "training", "(", "vat", ")", "is", "a", "powerful", "technique", "to", "improve", "model", "robustness", "in", "both", "supervised", "and", "semi", "-", "supervised", "settings", ".", "it", "is", "effective", "and", "can", "be", "easily", "adopted", "on", "lots", "of", "image", "classification", "and", "text", "classification", "tasks", ".", "however", ",", "its", "benefits", "to", "sequence", "labeling", "tasks", "such", "as", "named", "entity", "recognition", "(", "ner", ")", "have", "not", "been", "shown", "as", "significant", ",", "mostly", ",", "because", "the", "previous", "approach", "can", "not", "combine", "vat", "with", "the", "conditional", "random", "field", "(", "crf", ")", ".", "crf", "can", "significantly", "boost", "accuracy", "for", "sequence", "models", "by", "putting", "constraints", "on", "label", "transitions", ",", "which", "makes", "it", "an", "essential", "component", "in", "most", "state", "-", "of", "-", "the", "-", "art", "sequence", "labeling", "model", "architectures", ".", "in", "this", "paper", ",", "we", "propose", "seqvat", ",", "a", "method", "which", "naturally", "applies", "vat", "to", "sequence", "labeling", "models", "with", "crf", ".", "empirical", "studies", "show", "that", "seqvat", "not", "only", "significantly", "improves", "the", "sequence", "labeling", "performance", "over", "baselines", "under", "supervised", "settings", ",", "but", "also", "outperforms", "state", "-", "of", "-", "the", "-", "art", "approaches", "under", "semi", "-", "supervised", "settings", "."]}, {"venue": "ACL", "title": "SParC: Cross-Domain Semantic Parsing in Context", "abstract": "We present SParC, a dataset for cross-domainSemanticParsing inContext that consists of 4,298 coherent question sequences (12k+ individual questions annotated with SQL queries). It is obtained from controlled user interactions with 200 complex databases over 138 domains. We provide an in-depth analysis of SParC and show that it introduces new challenges compared to existing datasets. SParC demonstrates complex contextual dependencies, (2) has greater semantic diversity, and (3) requires generalization to unseen domains due to its cross-domain nature and the unseen databases at test time. We experiment with two state-of-the-art text-to-SQL models adapted to the context-dependent, cross-domain setup. The best model obtains an exact match accuracy of 20.2% over all questions and less than10% over all interaction sequences, indicating that the cross-domain setting and the con-textual phenomena of the dataset present significant challenges for future research. The dataset, baselines, and leaderboard are released at https://yale-lily.github.io/sparc.", "doc_id": "aa09245bfd4e8fe1d7711e423556f5d8", "publication_year": 2019, "sentences": ["we present sparc , a dataset for cross - domainsemanticparsing incontext that consists of 4 , 298 coherent question sequences ( 12k + individual questions annotated with sql queries ) .", "it is obtained from controlled user interactions with 200 complex databases over 138 domains .", "we provide an in - depth analysis of sparc and show that it introduces new challenges compared to existing datasets .", "sparc demonstrates complex contextual dependencies , ( 2 ) has greater semantic diversity , and ( 3 ) requires generalization to unseen domains due to its cross - domain nature and the unseen databases at test time .", "we experiment with two state - of - the - art text - to - sql models adapted to the context - dependent , cross - domain setup .", "the best model obtains an exact match accuracy of 20 . 2 % over all questions and less than10 % over all interaction sequences , indicating that the cross - domain setting and the con - textual phenomena of the dataset present significant challenges for future research .", "the dataset , baselines , and leaderboard are released at https : / / yale - lily . github . io / sparc ."], "events": [{"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [0]}, {"text": "sparc", "nugget_type": "DST", "argument_type": "Content", "tokens": ["sparc"], "offsets": [2]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [1]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [46]}, {"text": "in - depth analysis of sparc", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["in", "-", "depth", "analysis", "of", "sparc"], "offsets": [49, 50, 51, 52, 53, 54]}], "trigger": {"text": "provide", "tokens": ["provide"], "offsets": [47]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [46]}, {"text": "introduces", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["introduces"], "offsets": [59]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [56]}}, {"event_type": "CMP", "arguments": [{"text": "sparc", "nugget_type": "DST", "argument_type": "Arg1", "tokens": ["sparc"], "offsets": [54]}, {"text": "existing datasets", "nugget_type": "DST", "argument_type": "Arg2", "tokens": ["existing", "datasets"], "offsets": [64, 65]}, {"text": "new", "nugget_type": "STR", "argument_type": "Result", "tokens": ["new"], "offsets": [60]}, {"text": "challenges", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["challenges"], "offsets": [61]}], "trigger": {"text": "introduces", "tokens": ["introduces"], "offsets": [59]}}, {"event_type": "FAC", "arguments": [{"text": "sparc", "nugget_type": "DST", "argument_type": "Subject", "tokens": ["sparc"], "offsets": [67]}, {"text": "complex contextual dependencies", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["complex", "contextual", "dependencies"], "offsets": [69, 70, 71]}], "trigger": {"text": "demonstrates", "tokens": ["demonstrates"], "offsets": [68]}}, {"event_type": "FAC", "arguments": [{"text": "greater semantic diversity", "nugget_type": "STR", "argument_type": "Object", "tokens": ["greater", "semantic", "diversity"], "offsets": [77, 78, 79]}, {"text": "sparc", "nugget_type": "DST", "argument_type": "Subject", "tokens": ["sparc"], "offsets": [67]}], "trigger": {"text": "has", "tokens": ["has"], "offsets": [76]}}, {"event_type": "FAC", "arguments": [{"text": "sparc", "nugget_type": "DST", "argument_type": "Subject", "tokens": ["sparc"], "offsets": [67]}, {"text": "generalization", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["generalization"], "offsets": [86]}, {"text": "unseen domains", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["unseen", "domains"], "offsets": [88, 89]}], "trigger": {"text": "requires", "tokens": ["requires"], "offsets": [85]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [105]}, {"text": "two state - of - the - art text - to - sql models", "nugget_type": "APP", "argument_type": "Content", "tokens": ["two", "state", "-", "of", "-", "the", "-", "art", "text", "-", "to", "-", "sql", "models"], "offsets": [108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121]}, {"text": "adapted", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["adapted"], "offsets": [122]}], "trigger": {"text": "experiment", "tokens": ["experiment"], "offsets": [106]}}, {"event_type": "PUR", "arguments": [{"text": "context - dependent , cross - domain setup", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["context", "-", "dependent", ",", "cross", "-", "domain", "setup"], "offsets": [125, 126, 127, 128, 129, 130, 131, 132]}], "trigger": {"text": "adapted", "tokens": ["adapted"], "offsets": [122]}}, {"event_type": "FAC", "arguments": [{"text": "best model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["best", "model"], "offsets": [135, 136]}, {"text": "over all questions", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["over", "all", "questions"], "offsets": [147, 148, 149]}, {"text": "exact match accuracy", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["exact", "match", "accuracy"], "offsets": [139, 140, 141]}, {"text": "20 . 2 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["20", ".", "2", "%"], "offsets": [143, 144, 145, 146]}], "trigger": {"text": "obtains", "tokens": ["obtains"], "offsets": [137]}}, {"event_type": "FAC", "arguments": [{"text": "best model", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["best", "model"], "offsets": [135, 136]}, {"text": "exact match accuracy", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["exact", "match", "accuracy"], "offsets": [139, 140, 141]}, {"text": "less than10 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["less", "than10", "%"], "offsets": [151, 152, 153]}, {"text": "over all interaction sequences", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["over", "all", "interaction", "sequences"], "offsets": [154, 155, 156, 157]}], "trigger": {"text": "obtains", "tokens": ["obtains"], "offsets": [137]}}, {"event_type": "FAC", "arguments": [{"text": "the cross - domain setting", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["the", "cross", "-", "domain", "setting"], "offsets": [161, 162, 163, 164, 165]}, {"text": "con - textual phenomena", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["con", "-", "textual", "phenomena"], "offsets": [168, 169, 170, 171]}, {"text": "dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["dataset"], "offsets": [174]}, {"text": "significant challenges", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["significant", "challenges"], "offsets": [176, 177]}, {"text": "future research", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["future", "research"], "offsets": [179, 180]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [175]}}], "document": ["we", "present", "sparc", ",", "a", "dataset", "for", "cross", "-", "domainsemanticparsing", "incontext", "that", "consists", "of", "4", ",", "298", "coherent", "question", "sequences", "(", "12k", "+", "individual", "questions", "annotated", "with", "sql", "queries", ")", ".", "it", "is", "obtained", "from", "controlled", "user", "interactions", "with", "200", "complex", "databases", "over", "138", "domains", ".", "we", "provide", "an", "in", "-", "depth", "analysis", "of", "sparc", "and", "show", "that", "it", "introduces", "new", "challenges", "compared", "to", "existing", "datasets", ".", "sparc", "demonstrates", "complex", "contextual", "dependencies", ",", "(", "2", ")", "has", "greater", "semantic", "diversity", ",", "and", "(", "3", ")", "requires", "generalization", "to", "unseen", "domains", "due", "to", "its", "cross", "-", "domain", "nature", "and", "the", "unseen", "databases", "at", "test", "time", ".", "we", "experiment", "with", "two", "state", "-", "of", "-", "the", "-", "art", "text", "-", "to", "-", "sql", "models", "adapted", "to", "the", "context", "-", "dependent", ",", "cross", "-", "domain", "setup", ".", "the", "best", "model", "obtains", "an", "exact", "match", "accuracy", "of", "20", ".", "2", "%", "over", "all", "questions", "and", "less", "than10", "%", "over", "all", "interaction", "sequences", ",", "indicating", "that", "the", "cross", "-", "domain", "setting", "and", "the", "con", "-", "textual", "phenomena", "of", "the", "dataset", "present", "significant", "challenges", "for", "future", "research", ".", "the", "dataset", ",", "baselines", ",", "and", "leaderboard", "are", "released", "at", "https", ":", "/", "/", "yale", "-", "lily", ".", "github", ".", "io", "/", "sparc", "."]}, {"venue": "ACL", "title": "Spelling Error Correction with Soft-Masked BERT", "abstract": "Spelling error correction is an important yet challenging task because a satisfactory solution of it essentially needs human-level language understanding ability. Without loss of generality we consider Chinese spelling error correction (CSC) in this paper. A state-of-the-art method for the task selects a character from a list of candidates for correction (including non-correction) at each position of the sentence on the basis of BERT, the language representation model. The accuracy of the method can be sub-optimal, however, because BERT does not have sufficient capability to detect whether there is an error at each position, apparently due to the way of pre-training it using mask language modeling. In this work, we propose a novel neural architecture to address the aforementioned issue, which consists of a network for error detection and a network for error correction based on BERT, with the former being connected to the latter with what we call soft-masking technique. Our method of using \u2018Soft-Masked BERT\u2019 is general, and it may be employed in other language detection-correction problems. Experimental results on two datasets, including one large dataset which we create and plan to release, demonstrate that the performance of our proposed method is significantly better than the baselines including the one solely based on BERT.", "doc_id": "e2db219ac90871ede3d825ce7c1119b1", "publication_year": 2020, "sentences": ["spelling error correction is an important yet challenging task because a satisfactory solution of it essentially needs human - level language understanding ability .", "without loss of generality we consider chinese spelling error correction ( csc ) in this paper .", "a state - of - the - art method for the task selects a character from a list of candidates for correction ( including non - correction ) at each position of the sentence on the basis of bert , the language representation model .", "the accuracy of the method can be sub - optimal , however , because bert does not have sufficient capability to detect whether there is an error at each position , apparently due to the way of pre - training it using mask language modeling .", "in this work , we propose a novel neural architecture to address the aforementioned issue , which consists of a network for error detection and a network for error correction based on bert , with the former being connected to the latter with what we call soft - masking technique .", "our method of using \u2018 soft - masked bert \u2019 is general , and it may be employed in other language detection - correction problems .", "experimental results on two datasets , including one large dataset which we create and plan to release , demonstrate that the performance of our proposed method is significantly better than the baselines including the one solely based on bert ."], "events": [{"event_type": "ITT", "arguments": [{"text": "spelling error correction", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["spelling", "error", "correction"], "offsets": [0, 1, 2]}], "trigger": {"text": "task", "tokens": ["task"], "offsets": [8]}}, {"event_type": "WKS", "arguments": [{"text": "without loss of generality", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["without", "loss", "of", "generality"], "offsets": [24, 25, 26, 27]}, {"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [28]}, {"text": "chinese spelling error correction", "nugget_type": "APP", "argument_type": "Content", "tokens": ["chinese", "spelling", "error", "correction"], "offsets": [30, 31, 32, 33]}], "trigger": {"text": "consider", "tokens": ["consider"], "offsets": [29]}}, {"event_type": "RWS", "arguments": [{"text": "at each position of the sentence", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["at", "each", "position", "of", "the", "sentence"], "offsets": [69, 70, 71, 72, 73, 74]}, {"text": "character", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["character"], "offsets": [55]}, {"text": "list of candidates for correction", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["list", "of", "candidates", "for", "correction"], "offsets": [58, 59, 60, 61, 62]}, {"text": "state - of - the - art method", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["state", "-", "of", "-", "the", "-", "art", "method"], "offsets": [42, 43, 44, 45, 46, 47, 48, 49]}], "trigger": {"text": "selects", "tokens": ["selects"], "offsets": [53]}}, {"event_type": "RWF", "arguments": [{"text": "sub - optimal", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["sub", "-", "optimal"], "offsets": [93, 94, 95]}], "trigger": {"text": "sub - optimal", "tokens": ["sub", "-", "optimal"], "offsets": [93, 94, 95]}}, {"event_type": "RWF", "arguments": [{"text": "bert", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["bert"], "offsets": [100]}, {"text": "detect", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["detect"], "offsets": [107]}, {"text": "not have", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["not", "have"], "offsets": [102, 103]}], "trigger": {"text": "not have", "tokens": ["not", "have"], "offsets": [102, 103]}}, {"event_type": "PUR", "arguments": [{"text": "error", "nugget_type": "WEA", "argument_type": "Aim", "tokens": ["error"], "offsets": [112]}, {"text": "at each position", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["at", "each", "position"], "offsets": [113, 114, 115]}], "trigger": {"text": "detect", "tokens": ["detect"], "offsets": [107]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [136]}, {"text": "neural architecture", "nugget_type": "APP", "argument_type": "Content", "tokens": ["neural", "architecture"], "offsets": [140, 141]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [137]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [220]}, {"text": "large dataset", "nugget_type": "DST", "argument_type": "Content", "tokens": ["large", "dataset"], "offsets": [217, 218]}], "trigger": {"text": "create and plan to release", "tokens": ["create", "and", "plan", "to", "release"], "offsets": [221, 222, 223, 224, 225]}}, {"event_type": "FIN", "arguments": [{"text": "significantly better", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["significantly", "better"], "offsets": [236, 237]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [227]}}, {"event_type": "CMP", "arguments": [{"text": "baselines", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["baselines"], "offsets": [240]}, {"text": "significantly better", "nugget_type": "STR", "argument_type": "Result", "tokens": ["significantly", "better"], "offsets": [236, 237]}, {"text": "performance", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["performance"], "offsets": [230]}], "trigger": {"text": "significantly better", "tokens": ["significantly", "better"], "offsets": [236, 237]}}, {"event_type": "MDS", "arguments": [{"text": "network for error detection", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["network", "for", "error", "detection"], "offsets": [152, 153, 154, 155]}, {"text": "network for error correction", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["network", "for", "error", "correction"], "offsets": [158, 159, 160, 161]}, {"text": "soft - masking technique", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["soft", "-", "masking", "technique"], "offsets": [178, 179, 180, 181]}], "trigger": {"text": "connected", "tokens": ["connected"], "offsets": [170]}}], "document": ["spelling", "error", "correction", "is", "an", "important", "yet", "challenging", "task", "because", "a", "satisfactory", "solution", "of", "it", "essentially", "needs", "human", "-", "level", "language", "understanding", "ability", ".", "without", "loss", "of", "generality", "we", "consider", "chinese", "spelling", "error", "correction", "(", "csc", ")", "in", "this", "paper", ".", "a", "state", "-", "of", "-", "the", "-", "art", "method", "for", "the", "task", "selects", "a", "character", "from", "a", "list", "of", "candidates", "for", "correction", "(", "including", "non", "-", "correction", ")", "at", "each", "position", "of", "the", "sentence", "on", "the", "basis", "of", "bert", ",", "the", "language", "representation", "model", ".", "the", "accuracy", "of", "the", "method", "can", "be", "sub", "-", "optimal", ",", "however", ",", "because", "bert", "does", "not", "have", "sufficient", "capability", "to", "detect", "whether", "there", "is", "an", "error", "at", "each", "position", ",", "apparently", "due", "to", "the", "way", "of", "pre", "-", "training", "it", "using", "mask", "language", "modeling", ".", "in", "this", "work", ",", "we", "propose", "a", "novel", "neural", "architecture", "to", "address", "the", "aforementioned", "issue", ",", "which", "consists", "of", "a", "network", "for", "error", "detection", "and", "a", "network", "for", "error", "correction", "based", "on", "bert", ",", "with", "the", "former", "being", "connected", "to", "the", "latter", "with", "what", "we", "call", "soft", "-", "masking", "technique", ".", "our", "method", "of", "using", "\u2018", "soft", "-", "masked", "bert", "\u2019", "is", "general", ",", "and", "it", "may", "be", "employed", "in", "other", "language", "detection", "-", "correction", "problems", ".", "experimental", "results", "on", "two", "datasets", ",", "including", "one", "large", "dataset", "which", "we", "create", "and", "plan", "to", "release", ",", "demonstrate", "that", "the", "performance", "of", "our", "proposed", "method", "is", "significantly", "better", "than", "the", "baselines", "including", "the", "one", "solely", "based", "on", "bert", "."]}, {"venue": "ACL", "title": "Mitigating Gender Bias in Natural Language Processing: Literature Review", "abstract": "As Natural Language Processing (NLP) and Machine Learning (ML) tools rise in popularity, it becomes increasingly vital to recognize the role they play in shaping societal biases and stereotypes. Although NLP models have shown success in modeling various applications, they propagate and may even amplify gender bias found in text corpora. While the study of bias in artificial intelligence is not new, methods to mitigate gender bias in NLP are relatively nascent. In this paper, we review contemporary studies on recognizing and mitigating gender bias in NLP. We discuss gender bias based on four forms of representation bias and analyze methods recognizing gender bias. Furthermore, we discuss the advantages and drawbacks of existing gender debiasing methods. Finally, we discuss future studies for recognizing and mitigating gender bias in NLP.", "doc_id": "20f9cd772c961f284d20942acd6cecdd", "publication_year": 2019, "sentences": ["as natural language processing ( nlp ) and machine learning ( ml ) tools rise in popularity , it becomes increasingly vital to recognize the role they play in shaping societal biases and stereotypes .", "although nlp models have shown success in modeling various applications , they propagate and may even amplify gender bias found in text corpora .", "while the study of bias in artificial intelligence is not new , methods to mitigate gender bias in nlp are relatively nascent .", "in this paper , we review contemporary studies on recognizing and mitigating gender bias in nlp .", "we discuss gender bias based on four forms of representation bias and analyze methods recognizing gender bias .", "furthermore , we discuss the advantages and drawbacks of existing gender debiasing methods .", "finally , we discuss future studies for recognizing and mitigating gender bias in nlp ."], "events": [{"event_type": "ITT", "arguments": [{"text": "natural language processing", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["natural", "language", "processing"], "offsets": [1, 2, 3]}], "trigger": {"text": "becomes increasingly vital", "tokens": ["becomes", "increasingly", "vital"], "offsets": [19, 20, 21]}}, {"event_type": "RWF", "arguments": [{"text": "nlp models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["natural", "language", "processing", "models"], "offsets": [1, 2, 3, 37]}, {"text": "gender bias found in text corpora", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["gender", "bias", "found", "in", "text", "corpora"], "offsets": [52, 53, 54, 55, 56, 57]}], "trigger": {"text": "may even amplify", "tokens": ["may", "even", "amplify"], "offsets": [49, 50, 51]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [86]}, {"text": "contemporary studies on recognizing and mitigating gender bias in nlp", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["contemporary", "studies", "on", "recognizing", "and", "mitigating", "gender", "bias", "in", "natural", "language", "processing"], "offsets": [88, 89, 90, 91, 92, 93, 94, 95, 96, 1, 2, 3]}], "trigger": {"text": "review", "tokens": ["review"], "offsets": [87]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [99]}, {"text": "gender bias", "nugget_type": "WEA", "argument_type": "Content", "tokens": ["gender", "bias"], "offsets": [101, 102]}, {"text": "based on four forms of representation bias", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["based", "on", "four", "forms", "of", "representation", "bias"], "offsets": [103, 104, 105, 106, 107, 108, 109]}], "trigger": {"text": "discuss", "tokens": ["discuss"], "offsets": [100]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [99]}, {"text": "methods recognizing gender bias", "nugget_type": "APP", "argument_type": "Content", "tokens": ["methods", "recognizing", "gender", "bias"], "offsets": [112, 113, 114, 115]}], "trigger": {"text": "analyze", "tokens": ["analyze"], "offsets": [111]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [119]}, {"text": "advantages of existing gender debiasing methods", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["advantages", "of", "existing", "gender", "debiasing", "methods"], "offsets": [122, 125, 126, 127, 128, 129]}, {"text": "drawbacks of existing gender debiasing methods", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["drawbacks", "of", "existing", "gender", "debiasing", "methods"], "offsets": [124, 125, 126, 127, 128, 129]}], "trigger": {"text": "discuss", "tokens": ["discuss"], "offsets": [120]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [133]}, {"text": "future studies", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["future", "studies"], "offsets": [135, 136]}, {"text": "recognizing and mitigating", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["recognizing", "and", "mitigating"], "offsets": [138, 139, 140]}], "trigger": {"text": "discuss", "tokens": ["discuss"], "offsets": [134]}}, {"event_type": "PUR", "arguments": [{"text": "gender bias in nlp", "nugget_type": "WEA", "argument_type": "Aim", "tokens": ["gender", "bias", "in", "natural", "language", "processing"], "offsets": [141, 142, 143, 1, 2, 3]}], "trigger": {"text": "recognizing and mitigating", "tokens": ["recognizing", "and", "mitigating"], "offsets": [138, 139, 140]}}], "document": ["as", "natural", "language", "processing", "(", "nlp", ")", "and", "machine", "learning", "(", "ml", ")", "tools", "rise", "in", "popularity", ",", "it", "becomes", "increasingly", "vital", "to", "recognize", "the", "role", "they", "play", "in", "shaping", "societal", "biases", "and", "stereotypes", ".", "although", "nlp", "models", "have", "shown", "success", "in", "modeling", "various", "applications", ",", "they", "propagate", "and", "may", "even", "amplify", "gender", "bias", "found", "in", "text", "corpora", ".", "while", "the", "study", "of", "bias", "in", "artificial", "intelligence", "is", "not", "new", ",", "methods", "to", "mitigate", "gender", "bias", "in", "nlp", "are", "relatively", "nascent", ".", "in", "this", "paper", ",", "we", "review", "contemporary", "studies", "on", "recognizing", "and", "mitigating", "gender", "bias", "in", "nlp", ".", "we", "discuss", "gender", "bias", "based", "on", "four", "forms", "of", "representation", "bias", "and", "analyze", "methods", "recognizing", "gender", "bias", ".", "furthermore", ",", "we", "discuss", "the", "advantages", "and", "drawbacks", "of", "existing", "gender", "debiasing", "methods", ".", "finally", ",", "we", "discuss", "future", "studies", "for", "recognizing", "and", "mitigating", "gender", "bias", "in", "nlp", "."]}, {"venue": "ACL", "title": "Monotonic Infinite Lookback Attention for Simultaneous Machine Translation", "abstract": "Simultaneous machine translation begins to translate each source sentence before the source speaker is finished speaking, with applications to live and streaming scenarios. Simultaneous systems must carefully schedule their reading of the source sentence to balance quality against latency. We present the first simultaneous translation system to learn an adaptive schedule jointly with a neural machine translation (NMT) model that attends over all source tokens read thus far. We do so by introducing Monotonic Infinite Lookback (MILk) attention, which maintains both a hard, monotonic attention head to schedule the reading of the source sentence, and a soft attention head that extends from the monotonic head back to the beginning of the source. We show that MILk\u2019s adaptive schedule allows it to arrive at latency-quality trade-offs that are favorable to those of a recently proposed wait-k strategy for many latency values.", "doc_id": "45c471012af24cdf1d900ce70b1b2bfd", "publication_year": 2019, "sentences": ["simultaneous machine translation begins to translate each source sentence before the source speaker is finished speaking , with applications to live and streaming scenarios .", "simultaneous systems must carefully schedule their reading of the source sentence to balance quality against latency .", "we present the first simultaneous translation system to learn an adaptive schedule jointly with a neural machine translation ( nmt ) model that attends over all source tokens read thus far .", "we do so by introducing monotonic infinite lookback ( milk ) attention , which maintains both a hard , monotonic attention head to schedule the reading of the source sentence , and a soft attention head that extends from the monotonic head back to the beginning of the source .", "we show that milk \u2019 s adaptive schedule allows it to arrive at latency - quality trade - offs that are favorable to those of a recently proposed wait - k strategy for many latency values ."], "events": [{"event_type": "ITT", "arguments": [{"text": "simultaneous machine translation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["simultaneous", "machine", "translation"], "offsets": [0, 1, 2]}], "trigger": {"text": "begins", "tokens": ["begins"], "offsets": [3]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [42]}, {"text": "simultaneous translation system", "nugget_type": "APP", "argument_type": "Content", "tokens": ["simultaneous", "translation", "system"], "offsets": [46, 47, 48]}, {"text": "learn", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["learn"], "offsets": [50]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [43]}}, {"event_type": "PUR", "arguments": [{"text": "adaptive schedule jointly", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["adaptive", "schedule", "jointly"], "offsets": [52, 53, 54]}], "trigger": {"text": "learn", "tokens": ["learn"], "offsets": [50]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [74]}, {"text": "monotonic infinite lookback ( milk ) attention", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["monotonic", "infinite", "lookback", "attention"], "offsets": [79, 80, 81, 85]}, {"text": "learn", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["learn"], "offsets": [50]}], "trigger": {"text": "introducing", "tokens": ["introducing"], "offsets": [78]}}, {"event_type": "MDS", "arguments": [{"text": "schedule", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["schedule"], "offsets": [97]}, {"text": "hard , monotonic attention head", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["hard", ",", "monotonic", "attention", "head"], "offsets": [91, 92, 93, 94, 95]}, {"text": "soft attention head", "nugget_type": "MOD", "argument_type": "TriedComponent", "tokens": ["soft", "attention", "head"], "offsets": [107, 108, 109]}], "trigger": {"text": "maintains", "tokens": ["maintains"], "offsets": [88]}}, {"event_type": "PUR", "arguments": [{"text": "reading of the source sentence", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["reading", "of", "the", "source", "sentence"], "offsets": [99, 100, 101, 102, 103]}], "trigger": {"text": "schedule", "tokens": ["schedule"], "offsets": [97]}}, {"event_type": "FAC", "arguments": [{"text": "monotonic infinite lookback", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["monotonic", "infinite", "lookback"], "offsets": [79, 80, 81]}, {"text": "latency - quality trade - offs", "nugget_type": "FEA", "argument_type": "Object", "tokens": ["latency", "-", "quality", "trade", "-", "offs"], "offsets": [137, 138, 139, 140, 141, 142]}], "trigger": {"text": "arrive", "tokens": ["arrive"], "offsets": [135]}}], "document": ["simultaneous", "machine", "translation", "begins", "to", "translate", "each", "source", "sentence", "before", "the", "source", "speaker", "is", "finished", "speaking", ",", "with", "applications", "to", "live", "and", "streaming", "scenarios", ".", "simultaneous", "systems", "must", "carefully", "schedule", "their", "reading", "of", "the", "source", "sentence", "to", "balance", "quality", "against", "latency", ".", "we", "present", "the", "first", "simultaneous", "translation", "system", "to", "learn", "an", "adaptive", "schedule", "jointly", "with", "a", "neural", "machine", "translation", "(", "nmt", ")", "model", "that", "attends", "over", "all", "source", "tokens", "read", "thus", "far", ".", "we", "do", "so", "by", "introducing", "monotonic", "infinite", "lookback", "(", "milk", ")", "attention", ",", "which", "maintains", "both", "a", "hard", ",", "monotonic", "attention", "head", "to", "schedule", "the", "reading", "of", "the", "source", "sentence", ",", "and", "a", "soft", "attention", "head", "that", "extends", "from", "the", "monotonic", "head", "back", "to", "the", "beginning", "of", "the", "source", ".", "we", "show", "that", "milk", "\u2019", "s", "adaptive", "schedule", "allows", "it", "to", "arrive", "at", "latency", "-", "quality", "trade", "-", "offs", "that", "are", "favorable", "to", "those", "of", "a", "recently", "proposed", "wait", "-", "k", "strategy", "for", "many", "latency", "values", "."]}, {"venue": "ACL", "title": "Match the Script, Adapt if Multilingual: Analyzing the Effect of Multilingual Pretraining on Cross-lingual Transferability", "abstract": "Pretrained multilingual models enable zero-shot learning even for unseen languages, and that performance can be further improved via adaptation prior to finetuning. However, it is unclear how the number of pretraining languages influences a model\u2019s zero-shot learning for languages unseen during pretraining. To fill this gap, we ask the following research questions: (1) How does the number of pretraining languages influence zero-shot performance on unseen target languages? (2) Does the answer to that question change with model adaptation? (3) Do the findings for our first question change if the languages used for pretraining are all related? Our experiments on pretraining with related languages indicate that choosing a diverse set of languages is crucial. Without model adaptation, surprisingly, increasing the number of pretraining languages yields better results up to adding related languages, after which performance plateaus.In contrast, with model adaptation via continued pretraining, pretraining on a larger number of languages often gives further improvement, suggesting that model adaptation is crucial to exploit additional pretraining languages.", "doc_id": "31104fe67b38b60fe19ff9da54f19921", "publication_year": 2022, "sentences": ["pretrained multilingual models enable zero - shot learning even for unseen languages , and that performance can be further improved via adaptation prior to finetuning .", "however , it is unclear how the number of pretraining languages influences a model \u2019 s zero - shot learning for languages unseen during pretraining .", "to fill this gap , we ask the following research questions :", "( 1 ) how does the number of pretraining languages influence zero - shot performance on unseen target languages ?", "( 2 ) does the answer to that question change with model adaptation ?", "( 3 ) do the findings for our first question change if the languages used for pretraining are all related ?", "our experiments on pretraining with related languages indicate that choosing a diverse set of languages is crucial .", "without model adaptation , surprisingly , increasing the number of pretraining languages yields better results up to adding related languages , after which performance plateaus .", "in contrast , with model adaptation via continued pretraining , pretraining on a larger number of languages often gives further improvement , suggesting that model adaptation is crucial to exploit additional pretraining languages ."], "events": [{"event_type": "ITT", "arguments": [{"text": "zero - shot learning", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["zero", "-", "shot", "learning"], "offsets": [4, 5, 6, 7]}], "trigger": {"text": "enable", "tokens": ["enable"], "offsets": [3]}}, {"event_type": "FIN", "arguments": [{"text": "crucial", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["crucial"], "offsets": [135]}], "trigger": {"text": "indicate", "tokens": ["indicate"], "offsets": [126]}}, {"event_type": "FAC", "arguments": [{"text": "diverse set of languages", "nugget_type": "FEA", "argument_type": "Subject", "tokens": ["diverse", "set", "of", "languages"], "offsets": [130, 131, 132, 133]}], "trigger": {"text": "crucial", "tokens": ["crucial"], "offsets": [135]}}, {"event_type": "FAC", "arguments": [{"text": "increasing the number of pretraining languages", "nugget_type": "STR", "argument_type": "Subject", "tokens": ["increasing", "the", "number", "of", "pretraining", "languages"], "offsets": [143, 144, 145, 146, 147, 148]}, {"text": "better results", "nugget_type": "STR", "argument_type": "Object", "tokens": ["better", "results"], "offsets": [150, 151]}, {"text": "without model adaptation", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["without", "model", "adaptation"], "offsets": [137, 138, 139]}], "trigger": {"text": "yields", "tokens": ["yields"], "offsets": [149]}}, {"event_type": "FAC", "arguments": [{"text": "with model adaptation via continued pretraining", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "model", "adaptation", "via", "continued", "pretraining"], "offsets": [166, 167, 168, 169, 170, 171]}, {"text": "pretraining on a larger number of languages", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["pretraining", "on", "a", "larger", "number", "of", "languages"], "offsets": [173, 174, 175, 176, 177, 178, 179]}, {"text": "further improvement", "nugget_type": "STR", "argument_type": "Object", "tokens": ["further", "improvement"], "offsets": [182, 183]}], "trigger": {"text": "gives", "tokens": ["gives"], "offsets": [181]}}, {"event_type": "FIN", "arguments": [{"text": "crucial", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["crucial"], "offsets": [190]}], "trigger": {"text": "suggesting", "tokens": ["suggesting"], "offsets": [185]}}, {"event_type": "FAC", "arguments": [{"text": "model adaptation", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["model", "adaptation"], "offsets": [187, 188]}, {"text": "exploit", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["exploit"], "offsets": [192]}], "trigger": {"text": "crucial", "tokens": ["crucial"], "offsets": [190]}}, {"event_type": "PUR", "arguments": [{"text": "additional pretraining languages", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["additional", "pretraining", "languages"], "offsets": [193, 194, 195]}], "trigger": {"text": "exploit", "tokens": ["exploit"], "offsets": [192]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [57]}, {"text": "number of pretraining languages influence zero - shot performance on unseen target languages", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["number", "of", "pretraining", "languages", "influence", "zero", "-", "shot", "performance", "on", "unseen", "target", "languages"], "offsets": [70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82]}, {"text": "answer to that question change with model adaptation", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["answer", "to", "that", "question", "change", "with", "model", "adaptation"], "offsets": [89, 90, 91, 92, 93, 94, 95, 96]}, {"text": "findings for our first question change if the languages used for pretraining are all related", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["findings", "for", "our", "first", "question", "change", "if", "the", "languages", "used", "for", "pretraining", "are", "all", "related"], "offsets": [103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117]}], "trigger": {"text": "ask", "tokens": ["ask"], "offsets": [58]}}, {"event_type": "FAC", "arguments": [{"text": "performance", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["performance"], "offsets": [160]}, {"text": "adding related languages", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["adding", "related", "languages"], "offsets": [154, 155, 156]}], "trigger": {"text": "plateaus", "tokens": ["plateaus"], "offsets": [161]}}], "document": ["pretrained", "multilingual", "models", "enable", "zero", "-", "shot", "learning", "even", "for", "unseen", "languages", ",", "and", "that", "performance", "can", "be", "further", "improved", "via", "adaptation", "prior", "to", "finetuning", ".", "however", ",", "it", "is", "unclear", "how", "the", "number", "of", "pretraining", "languages", "influences", "a", "model", "\u2019", "s", "zero", "-", "shot", "learning", "for", "languages", "unseen", "during", "pretraining", ".", "to", "fill", "this", "gap", ",", "we", "ask", "the", "following", "research", "questions", ":", "(", "1", ")", "how", "does", "the", "number", "of", "pretraining", "languages", "influence", "zero", "-", "shot", "performance", "on", "unseen", "target", "languages", "?", "(", "2", ")", "does", "the", "answer", "to", "that", "question", "change", "with", "model", "adaptation", "?", "(", "3", ")", "do", "the", "findings", "for", "our", "first", "question", "change", "if", "the", "languages", "used", "for", "pretraining", "are", "all", "related", "?", "our", "experiments", "on", "pretraining", "with", "related", "languages", "indicate", "that", "choosing", "a", "diverse", "set", "of", "languages", "is", "crucial", ".", "without", "model", "adaptation", ",", "surprisingly", ",", "increasing", "the", "number", "of", "pretraining", "languages", "yields", "better", "results", "up", "to", "adding", "related", "languages", ",", "after", "which", "performance", "plateaus", ".", "in", "contrast", ",", "with", "model", "adaptation", "via", "continued", "pretraining", ",", "pretraining", "on", "a", "larger", "number", "of", "languages", "often", "gives", "further", "improvement", ",", "suggesting", "that", "model", "adaptation", "is", "crucial", "to", "exploit", "additional", "pretraining", "languages", "."]}, {"venue": "ACL", "title": "Connecting Embeddings for Knowledge Graph Entity Typing", "abstract": "Knowledge graph (KG) entity typing aims at inferring possible missing entity type instances in KG, which is a very significant but still under-explored subtask of knowledge graph completion. In this paper, we propose a novel approach for KG entity typing which is trained by jointly utilizing local typing knowledge from existing entity type assertions and global triple knowledge in KGs. Specifically, we present two distinct knowledge-driven effective mechanisms of entity type inference. Accordingly, we build two novel embedding models to realize the mechanisms. Afterward, a joint model via connecting them is used to infer missing entity type instances, which favors inferences that agree with both entity type instances and triple knowledge in KGs. Experimental results on two real-world datasets (Freebase and YAGO) demonstrate the effectiveness of our proposed mechanisms and models for improving KG entity typing. The source code and data of this paper can be obtained from: https://github.com/Adam1679/ConnectE .", "doc_id": "045401a650c51981b81f4d2ed90901cd", "publication_year": 2020, "sentences": ["knowledge graph ( kg ) entity typing aims at inferring possible missing entity type instances in kg , which is a very significant but still under - explored subtask of knowledge graph completion .", "in this paper , we propose a novel approach for kg entity typing which is trained by jointly utilizing local typing knowledge from existing entity type assertions and global triple knowledge in kgs .", "specifically , we present two distinct knowledge - driven effective mechanisms of entity type inference .", "accordingly , we build two novel embedding models to realize the mechanisms .", "afterward , a joint model via connecting them is used to infer missing entity type instances , which favors inferences that agree with both entity type instances and triple knowledge in kgs .", "experimental results on two real - world datasets ( freebase and yago ) demonstrate the effectiveness of our proposed mechanisms and models for improving kg entity typing .", "the source code and data of this paper can be obtained from : https : / / github . com / adam1679 / connecte ."], "events": [{"event_type": "ITT", "arguments": [{"text": "knowledge graph ( kg ) entity typing", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["knowledge", "graph", "entity", "typing"], "offsets": [0, 1, 5, 6]}], "trigger": {"text": "aims", "tokens": ["aims"], "offsets": [7]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [38]}, {"text": "approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["approach"], "offsets": [42]}, {"text": "knowledge graph ( kg ) entity typing", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["knowledge", "graph", "entity", "typing"], "offsets": [0, 1, 5, 6]}, {"text": "kg entity typing", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["kg", "entity", "typing"], "offsets": [44, 45, 46]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [39]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [38]}, {"text": "local typing knowledge from existing entity type assertions", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["local", "typing", "knowledge", "from", "existing", "entity", "type", "assertions"], "offsets": [53, 54, 55, 56, 57, 58, 59, 60]}, {"text": "global triple knowledge in kgs", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["global", "triple", "knowledge", "in", "knowledge", "graph"], "offsets": [62, 63, 64, 65, 0, 1]}], "trigger": {"text": "jointly utilizing", "tokens": ["jointly", "utilizing"], "offsets": [51, 52]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [70]}, {"text": "two distinct knowledge - driven effective mechanisms of entity type inference", "nugget_type": "APP", "argument_type": "Content", "tokens": ["two", "distinct", "knowledge", "-", "driven", "effective", "mechanisms", "of", "entity", "type", "inference"], "offsets": [72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [71]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [86]}, {"text": "two novel embedding models", "nugget_type": "APP", "argument_type": "Content", "tokens": ["two", "novel", "embedding", "models"], "offsets": [88, 89, 90, 91]}, {"text": "realize", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["realize"], "offsets": [93]}], "trigger": {"text": "build", "tokens": ["build"], "offsets": [87]}}, {"event_type": "PUR", "arguments": [{"text": "two distinct knowledge - driven effective mechanisms", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["two", "distinct", "knowledge", "-", "driven", "effective", "mechanisms"], "offsets": [72, 73, 74, 75, 76, 77, 78]}], "trigger": {"text": "realize", "tokens": ["realize"], "offsets": [93]}}, {"event_type": "WKS", "arguments": [{"text": "joint model", "nugget_type": "APP", "argument_type": "Content", "tokens": ["joint", "model"], "offsets": [100, 101]}, {"text": "infer", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["infer"], "offsets": [108]}], "trigger": {"text": "used", "tokens": ["used"], "offsets": [106]}}, {"event_type": "PUR", "arguments": [{"text": "missing entity type instances", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["missing", "entity", "type", "instances"], "offsets": [109, 110, 111, 112]}], "trigger": {"text": "infer", "tokens": ["infer"], "offsets": [108]}}, {"event_type": "FAC", "arguments": [{"text": "two real - world datasets", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["two", "real", "-", "world", "datasets"], "offsets": [133, 134, 135, 136, 137]}, {"text": "effectiveness", "nugget_type": "STR", "argument_type": "Subject", "tokens": ["effectiveness"], "offsets": [145]}, {"text": "improving", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improving"], "offsets": [153]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [143]}}, {"event_type": "PUR", "arguments": [{"text": "knowledge graph ( kg ) entity typing", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["knowledge", "graph", "entity", "typing"], "offsets": [0, 1, 5, 6]}], "trigger": {"text": "improving", "tokens": ["improving"], "offsets": [153]}}], "document": ["knowledge", "graph", "(", "kg", ")", "entity", "typing", "aims", "at", "inferring", "possible", "missing", "entity", "type", "instances", "in", "kg", ",", "which", "is", "a", "very", "significant", "but", "still", "under", "-", "explored", "subtask", "of", "knowledge", "graph", "completion", ".", "in", "this", "paper", ",", "we", "propose", "a", "novel", "approach", "for", "kg", "entity", "typing", "which", "is", "trained", "by", "jointly", "utilizing", "local", "typing", "knowledge", "from", "existing", "entity", "type", "assertions", "and", "global", "triple", "knowledge", "in", "kgs", ".", "specifically", ",", "we", "present", "two", "distinct", "knowledge", "-", "driven", "effective", "mechanisms", "of", "entity", "type", "inference", ".", "accordingly", ",", "we", "build", "two", "novel", "embedding", "models", "to", "realize", "the", "mechanisms", ".", "afterward", ",", "a", "joint", "model", "via", "connecting", "them", "is", "used", "to", "infer", "missing", "entity", "type", "instances", ",", "which", "favors", "inferences", "that", "agree", "with", "both", "entity", "type", "instances", "and", "triple", "knowledge", "in", "kgs", ".", "experimental", "results", "on", "two", "real", "-", "world", "datasets", "(", "freebase", "and", "yago", ")", "demonstrate", "the", "effectiveness", "of", "our", "proposed", "mechanisms", "and", "models", "for", "improving", "kg", "entity", "typing", ".", "the", "source", "code", "and", "data", "of", "this", "paper", "can", "be", "obtained", "from", ":", "https", ":", "/", "/", "github", ".", "com", "/", "adam1679", "/", "connecte", "."]}, {"venue": "ACL", "title": "CLEVE: Contrastive Pre-training for Event Extraction", "abstract": "Event extraction (EE) has considerably benefited from pre-trained language models (PLMs) by fine-tuning. However, existing pre-training methods have not involved modeling event characteristics, resulting in the developed EE models cannot take full advantage of large-scale unsupervised data. To this end, we propose CLEVE, a contrastive pre-training framework for EE to better learn event knowledge from large unsupervised data and their semantic structures (e.g. AMR) obtained with automatic parsers. CLEVE contains a text encoder to learn event semantics and a graph encoder to learn event structures respectively. Specifically, the text encoder learns event semantic representations by self-supervised contrastive learning to represent the words of the same events closer than those unrelated words; the graph encoder learns event structure representations by graph contrastive pre-training on parsed event-related semantic structures. The two complementary representations then work together to improve both the conventional supervised EE and the unsupervised \u201cliberal\u201d EE, which requires jointly extracting events and discovering event schemata without any annotated data. Experiments on ACE 2005 and MAVEN datasets show that CLEVE achieves significant improvements, especially in the challenging unsupervised setting. The source code and pre-trained checkpoints can be obtained from https://github.com/THU-KEG/CLEVE.", "doc_id": "e791d4e3fa83f54164af634d4d8d0932", "publication_year": 2021, "sentences": ["event extraction ( ee ) has considerably benefited from pre - trained language models ( plms ) by fine - tuning .", "however , existing pre - training methods have not involved modeling event characteristics , resulting in the developed ee models cannot take full advantage of large - scale unsupervised data .", "to this end , we propose cleve , a contrastive pre - training framework for ee to better learn event knowledge from large unsupervised data and their semantic structures ( e . g . amr ) obtained with automatic parsers .", "cleve contains a text encoder to learn event semantics and a graph encoder to learn event structures respectively .", "specifically , the text encoder learns event semantic representations by self - supervised contrastive learning to represent the words of the same events closer than those unrelated words ; the graph encoder learns event structure representations by graph contrastive pre - training on parsed event - related semantic structures .", "the two complementary representations then work together to improve both the conventional supervised ee and the unsupervised \u201c liberal \u201d ee , which requires jointly extracting events and discovering event schemata without any annotated data .", "experiments on ace 2005 and maven datasets show that cleve achieves significant improvements , especially in the challenging unsupervised setting .", "the source code and pre - trained checkpoints can be obtained from https : / / github . com / thu - keg / cleve ."], "events": [{"event_type": "ITT", "arguments": [{"text": "event extraction", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["event", "extraction"], "offsets": [0, 1]}], "trigger": {"text": "benefited", "tokens": ["benefited"], "offsets": [7]}}, {"event_type": "RWF", "arguments": [{"text": "existing pre - training methods", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["existing", "pre", "-", "training", "methods"], "offsets": [24, 25, 26, 27, 28]}, {"text": "not involved", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["not", "involved"], "offsets": [30, 31]}], "trigger": {"text": "not involved", "tokens": ["not", "involved"], "offsets": [30, 31]}}, {"event_type": "RWF", "arguments": [{"text": "developed ee models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["developed", "event", "extraction", "models"], "offsets": [39, 0, 1, 41]}, {"text": "cannot take full advantage", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["cannot", "take", "full", "advantage"], "offsets": [42, 43, 44, 45]}], "trigger": {"text": "cannot take full advantage", "tokens": ["cannot", "take", "full", "advantage"], "offsets": [42, 43, 44, 45]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [57]}, {"text": "event extraction", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["event", "extraction"], "offsets": [0, 1]}, {"text": "contrastive pre - training framework", "nugget_type": "APP", "argument_type": "Content", "tokens": ["contrastive", "pre", "-", "training", "framework"], "offsets": [62, 63, 64, 65, 66]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [58]}}, {"event_type": "MDS", "arguments": [{"text": "event semantic representations", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["event", "semantic", "representations"], "offsets": [119, 120, 121]}, {"text": "represent", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["represent"], "offsets": [129]}, {"text": "text encoder", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["text", "encoder"], "offsets": [116, 117]}, {"text": "self - supervised contrastive learning", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["self", "-", "supervised", "contrastive", "learning"], "offsets": [123, 124, 125, 126, 127]}], "trigger": {"text": "learns", "tokens": ["learns"], "offsets": [118]}}, {"event_type": "PUR", "arguments": [{"text": "words of the same events", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["words", "of", "the", "same", "events"], "offsets": [131, 132, 133, 134, 135]}], "trigger": {"text": "represent", "tokens": ["represent"], "offsets": [129]}}, {"event_type": "MDS", "arguments": [{"text": "graph encoder", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["graph", "encoder"], "offsets": [143, 144]}, {"text": "event structure representations", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["event", "structure", "representations"], "offsets": [146, 147, 148]}, {"text": "pre - training", "nugget_type": "E-MDS", "argument_type": "Condition", "tokens": ["pre", "-", "training"], "offsets": [152, 153, 154]}], "trigger": {"text": "learns", "tokens": ["learns"], "offsets": [145]}}, {"event_type": "FIN", "arguments": [{"text": "achieves", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["achieves"], "offsets": [209]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [206]}}, {"event_type": "FAC", "arguments": [{"text": "cleve", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["cleve"], "offsets": [208]}, {"text": "significant improvements", "nugget_type": "STR", "argument_type": "Object", "tokens": ["significant", "improvements"], "offsets": [210, 211]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [209]}}, {"event_type": "MDS", "arguments": [{"text": "graph contrastive", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["graph", "contrastive"], "offsets": [150, 151]}, {"text": "parsed event - related semantic structures", "nugget_type": "MOD", "argument_type": "BaseComponent", "tokens": ["parsed", "event", "-", "related", "semantic", "structures"], "offsets": [156, 157, 158, 159, 160, 161]}], "trigger": {"text": "pre - training", "tokens": ["pre", "-", "training"], "offsets": [152, 153, 154]}}, {"event_type": "MDS", "arguments": [{"text": "two complementary representations", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["two", "complementary", "representations"], "offsets": [164, 165, 166]}, {"text": "improve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["improve"], "offsets": [171]}], "trigger": {"text": "work together", "tokens": ["work", "together"], "offsets": [168, 169]}}, {"event_type": "PUR", "arguments": [{"text": "conventional supervised ee", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["conventional", "supervised", "event", "extraction"], "offsets": [174, 175, 0, 1]}, {"text": "unsupervised \u201c liberal \u201d ee", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["unsupervised", "\u201c", "liberal", "\u201d", "event", "extraction"], "offsets": [179, 180, 181, 182, 0, 1]}], "trigger": {"text": "improve", "tokens": ["improve"], "offsets": [171]}}], "document": ["event", "extraction", "(", "ee", ")", "has", "considerably", "benefited", "from", "pre", "-", "trained", "language", "models", "(", "plms", ")", "by", "fine", "-", "tuning", ".", "however", ",", "existing", "pre", "-", "training", "methods", "have", "not", "involved", "modeling", "event", "characteristics", ",", "resulting", "in", "the", "developed", "ee", "models", "cannot", "take", "full", "advantage", "of", "large", "-", "scale", "unsupervised", "data", ".", "to", "this", "end", ",", "we", "propose", "cleve", ",", "a", "contrastive", "pre", "-", "training", "framework", "for", "ee", "to", "better", "learn", "event", "knowledge", "from", "large", "unsupervised", "data", "and", "their", "semantic", "structures", "(", "e", ".", "g", ".", "amr", ")", "obtained", "with", "automatic", "parsers", ".", "cleve", "contains", "a", "text", "encoder", "to", "learn", "event", "semantics", "and", "a", "graph", "encoder", "to", "learn", "event", "structures", "respectively", ".", "specifically", ",", "the", "text", "encoder", "learns", "event", "semantic", "representations", "by", "self", "-", "supervised", "contrastive", "learning", "to", "represent", "the", "words", "of", "the", "same", "events", "closer", "than", "those", "unrelated", "words", ";", "the", "graph", "encoder", "learns", "event", "structure", "representations", "by", "graph", "contrastive", "pre", "-", "training", "on", "parsed", "event", "-", "related", "semantic", "structures", ".", "the", "two", "complementary", "representations", "then", "work", "together", "to", "improve", "both", "the", "conventional", "supervised", "ee", "and", "the", "unsupervised", "\u201c", "liberal", "\u201d", "ee", ",", "which", "requires", "jointly", "extracting", "events", "and", "discovering", "event", "schemata", "without", "any", "annotated", "data", ".", "experiments", "on", "ace", "2005", "and", "maven", "datasets", "show", "that", "cleve", "achieves", "significant", "improvements", ",", "especially", "in", "the", "challenging", "unsupervised", "setting", ".", "the", "source", "code", "and", "pre", "-", "trained", "checkpoints", "can", "be", "obtained", "from", "https", ":", "/", "/", "github", ".", "com", "/", "thu", "-", "keg", "/", "cleve", "."]}, {"venue": "ACL", "title": "Boosting Entity Linking Performance by Leveraging Unlabeled Documents", "abstract": "Modern entity linking systems rely on large collections of documents specifically annotated for the task (e.g., AIDA CoNLL). In contrast, we propose an approach which exploits only naturally occurring information: unlabeled documents and Wikipedia. Our approach consists of two stages. First, we construct a high recall list of candidate entities for each mention in an unlabeled document. Second, we use the candidate lists as weak supervision to constrain our document-level entity linking model. The model treats entities as latent variables and, when estimated on a collection of unlabelled texts, learns to choose entities relying both on local context of each mention and on coherence with other entities in the document. The resulting approach rivals fully-supervised state-of-the-art systems on standard test sets. It also approaches their performance in the very challenging setting: when tested on a test set sampled from the data used to estimate the supervised systems. By comparing to Wikipedia-only training of our model, we demonstrate that modeling unlabeled documents is beneficial.", "doc_id": "400017ce27ee59bff4a9b332f06b05d6", "publication_year": 2019, "sentences": ["modern entity linking systems rely on large collections of documents specifically annotated for the task ( e . g . , aida conll ) .", "in contrast , we propose an approach which exploits only naturally occurring information : unlabeled documents and wikipedia .", "our approach consists of two stages .", "first , we construct a high recall list of candidate entities for each mention in an unlabeled document .", "second , we use the candidate lists as weak supervision to constrain our document - level entity linking model .", "the model treats entities as latent variables and , when estimated on a collection of unlabelled texts , learns to choose entities relying both on local context of each mention and on coherence with other entities in the document .", "the resulting approach rivals fully - supervised state - of - the - art systems on standard test sets .", "it also approaches their performance in the very challenging setting : when tested on a test set sampled from the data used to estimate the supervised systems .", "by comparing to wikipedia - only training of our model , we demonstrate that modeling unlabeled documents is beneficial ."], "events": [{"event_type": "ITT", "arguments": [{"text": "modern entity linking systems", "nugget_type": "APP", "argument_type": "Target", "tokens": ["modern", "entity", "linking", "systems"], "offsets": [0, 1, 2, 3]}], "trigger": {"text": "rely", "tokens": ["rely"], "offsets": [4]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [28]}, {"text": "approach", "nugget_type": "APP", "argument_type": "Content", "tokens": ["approach"], "offsets": [31]}, {"text": "exploits", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["exploits"], "offsets": [33]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [29]}}, {"event_type": "PUR", "arguments": [{"text": "only naturally occurring information", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["only", "unlabeled", "documents", "and", "wikipedia"], "offsets": [34, 39, 40, 41, 42]}], "trigger": {"text": "exploits", "tokens": ["exploits"], "offsets": [33]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [53]}, {"text": "high recall list of candidate entities", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["high", "recall", "list", "of", "candidate", "entities"], "offsets": [56, 57, 58, 59, 60, 61]}, {"text": "each mention", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["each", "mention"], "offsets": [63, 64]}, {"text": "in an unlabeled document", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "an", "unlabeled", "document"], "offsets": [65, 66, 67, 68]}], "trigger": {"text": "construct", "tokens": ["construct"], "offsets": [54]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [72]}, {"text": "candidate lists", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["candidate", "lists"], "offsets": [75, 76]}, {"text": "weak supervision", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["weak", "supervision"], "offsets": [78, 79]}, {"text": "constrain", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["constrain"], "offsets": [81]}], "trigger": {"text": "use", "tokens": ["use"], "offsets": [73]}}, {"event_type": "PUR", "arguments": [{"text": "document - level entity linking model", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["document", "-", "level", "entity", "linking", "model"], "offsets": [83, 84, 85, 86, 87, 88]}], "trigger": {"text": "constrain", "tokens": ["constrain"], "offsets": [81]}}, {"event_type": "WKS", "arguments": [{"text": "entities", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["entities"], "offsets": [93]}, {"text": "latent variables", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["latent", "variables"], "offsets": [95, 96]}], "trigger": {"text": "treats", "tokens": ["treats"], "offsets": [92]}}, {"event_type": "WKS", "arguments": [{"text": "when estimated on a collection of unlabelled texts", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "estimated", "on", "a", "collection", "of", "unlabelled", "texts"], "offsets": [99, 100, 101, 102, 103, 104, 105, 106]}, {"text": "entities", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["entities"], "offsets": [111]}, {"text": "relying both on local context of each mention and on coherence with other entities in the document", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["relying", "both", "on", "local", "context", "of", "each", "mention", "and", "on", "coherence", "with", "other", "entities", "in", "the", "document"], "offsets": [112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128]}], "trigger": {"text": "choose", "tokens": ["choose"], "offsets": [110]}}, {"event_type": "CMP", "arguments": [{"text": "resulting approach", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["resulting", "approach"], "offsets": [131, 132]}, {"text": "fully - supervised state - of - the - art systems", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["fully", "-", "supervised", "state", "-", "of", "-", "the", "-", "art", "systems"], "offsets": [134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144]}, {"text": "on standard test sets", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "standard", "test", "sets"], "offsets": [145, 146, 147, 148]}], "trigger": {"text": "rivals", "tokens": ["rivals"], "offsets": [133]}}, {"event_type": "FAC", "arguments": [{"text": "their performance", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["resulting", "approach", "performance"], "offsets": [131, 132, 154]}, {"text": "in the very challenging setting", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "very", "challenging", "setting"], "offsets": [155, 156, 157, 158, 159]}, {"text": "resulting approach", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["resulting", "approach"], "offsets": [131, 132]}], "trigger": {"text": "approaches", "tokens": ["approaches"], "offsets": [152]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [189]}, {"text": "beneficial", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["beneficial"], "offsets": [196]}], "trigger": {"text": "demonstrate", "tokens": ["demonstrate"], "offsets": [190]}}, {"event_type": "FAC", "arguments": [{"text": "modeling unlabeled documents", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["modeling", "unlabeled", "documents"], "offsets": [192, 193, 194]}], "trigger": {"text": "beneficial", "tokens": ["beneficial"], "offsets": [196]}}], "document": ["modern", "entity", "linking", "systems", "rely", "on", "large", "collections", "of", "documents", "specifically", "annotated", "for", "the", "task", "(", "e", ".", "g", ".", ",", "aida", "conll", ")", ".", "in", "contrast", ",", "we", "propose", "an", "approach", "which", "exploits", "only", "naturally", "occurring", "information", ":", "unlabeled", "documents", "and", "wikipedia", ".", "our", "approach", "consists", "of", "two", "stages", ".", "first", ",", "we", "construct", "a", "high", "recall", "list", "of", "candidate", "entities", "for", "each", "mention", "in", "an", "unlabeled", "document", ".", "second", ",", "we", "use", "the", "candidate", "lists", "as", "weak", "supervision", "to", "constrain", "our", "document", "-", "level", "entity", "linking", "model", ".", "the", "model", "treats", "entities", "as", "latent", "variables", "and", ",", "when", "estimated", "on", "a", "collection", "of", "unlabelled", "texts", ",", "learns", "to", "choose", "entities", "relying", "both", "on", "local", "context", "of", "each", "mention", "and", "on", "coherence", "with", "other", "entities", "in", "the", "document", ".", "the", "resulting", "approach", "rivals", "fully", "-", "supervised", "state", "-", "of", "-", "the", "-", "art", "systems", "on", "standard", "test", "sets", ".", "it", "also", "approaches", "their", "performance", "in", "the", "very", "challenging", "setting", ":", "when", "tested", "on", "a", "test", "set", "sampled", "from", "the", "data", "used", "to", "estimate", "the", "supervised", "systems", ".", "by", "comparing", "to", "wikipedia", "-", "only", "training", "of", "our", "model", ",", "we", "demonstrate", "that", "modeling", "unlabeled", "documents", "is", "beneficial", "."]}, {"venue": "ACL", "title": "Flooding-X: Improving BERT\u2019s Resistance to Adversarial Attacks via Loss-Restricted Fine-Tuning", "abstract": "Adversarial robustness has attracted much attention recently, and the mainstream solution is adversarial training. However, the tradition of generating adversarial perturbations for each input embedding (in the settings of NLP) scales up the training computational complexity by the number of gradient steps it takes to obtain the adversarial samples. To address this problem, we leverage Flooding method which primarily aims at better generalization and we find promising in defending adversarial attacks. We further propose an effective criterion to bring hyper-parameter-dependent flooding into effect with a narrowed-down search space by measuring how the gradient steps taken within one epoch affect the loss of each batch. Our approach requires zero adversarial sample for training, and its time consumption is equivalent to fine-tuning, which can be 2-15 times faster than standard adversarial training. We experimentally show that our method improves BERT\u2019s resistance to textual adversarial attacks by a large margin, and achieves state-of-the-art robust accuracy on various text classification and GLUE tasks.", "doc_id": "5a412e4496b34ab9694e7b9a3e3274e3", "publication_year": 2022, "sentences": ["adversarial robustness has attracted much attention recently , and the mainstream solution is adversarial training .", "however , the tradition of generating adversarial perturbations for each input embedding ( in the settings of nlp ) scales up the training computational complexity by the number of gradient steps it takes to obtain the adversarial samples .", "to address this problem , we leverage flooding method which primarily aims at better generalization and we find promising in defending adversarial attacks .", "we further propose an effective criterion to bring hyper - parameter - dependent flooding into effect with a narrowed - down search space by measuring how the gradient steps taken within one epoch affect the loss of each batch .", "our approach requires zero adversarial sample for training , and its time consumption is equivalent to fine - tuning , which can be 2 - 15 times faster than standard adversarial training .", "we experimentally show that our method improves bert \u2019 s resistance to textual adversarial attacks by a large margin , and achieves state - of - the - art robust accuracy on various text classification and glue tasks ."], "events": [{"event_type": "ITT", "arguments": [{"text": "adversarial robustness", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["adversarial", "robustness"], "offsets": [0, 1]}], "trigger": {"text": "attracted", "tokens": ["attracted"], "offsets": [3]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [60]}, {"text": "flooding method", "nugget_type": "APP", "argument_type": "Content", "tokens": ["flooding", "method"], "offsets": [62, 63]}, {"text": "better generalization", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["better", "generalization"], "offsets": [68, 69]}], "trigger": {"text": "leverage", "tokens": ["leverage"], "offsets": [61]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [71]}, {"text": "promising", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["promising"], "offsets": [73]}, {"text": "in defending adversarial attacks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "defending", "adversarial", "attacks"], "offsets": [74, 75, 76, 77]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [72]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [79]}, {"text": "effective criterion", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["effective", "criterion"], "offsets": [83, 84]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [81]}}, {"event_type": "WKS", "arguments": [{"text": "gradient steps", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["gradient", "steps"], "offsets": [106, 107]}, {"text": "taken within one epoch", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["taken", "within", "one", "epoch"], "offsets": [108, 109, 110, 111]}, {"text": "bring", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["bring"], "offsets": [86]}, {"text": "effect", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["effect"], "offsets": [94]}, {"text": "with a narrowed - down search space", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["with", "a", "narrowed", "-", "down", "search", "space"], "offsets": [95, 96, 97, 98, 99, 100, 101]}], "trigger": {"text": "measuring", "tokens": ["measuring"], "offsets": [103]}}, {"event_type": "PUR", "arguments": [{"text": "hyper - parameter - dependent flooding", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["hyper", "-", "parameter", "-", "dependent", "flooding"], "offsets": [87, 88, 89, 90, 91, 92]}], "trigger": {"text": "bring", "tokens": ["bring"], "offsets": [86]}}, {"event_type": "WKS", "arguments": [{"text": "zero adversarial sample", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["zero", "adversarial", "sample"], "offsets": [122, 123, 124]}, {"text": "training", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["training"], "offsets": [126]}], "trigger": {"text": "requires", "tokens": ["requires"], "offsets": [121]}}, {"event_type": "CMP", "arguments": [{"text": "2 - 15", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["2", "-", "15"], "offsets": [142, 143, 144]}, {"text": "times", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["times"], "offsets": [145]}, {"text": "faster", "nugget_type": "STR", "argument_type": "Result", "tokens": ["faster"], "offsets": [146]}], "trigger": {"text": "faster", "tokens": ["faster"], "offsets": [146]}}, {"event_type": "FIN", "arguments": [{"text": "improves", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["improves"], "offsets": [158]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [154]}}, {"event_type": "CMP", "arguments": [{"text": "flooding method", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["flooding", "method"], "offsets": [62, 63]}, {"text": "improves", "nugget_type": "STR", "argument_type": "Result", "tokens": ["improves"], "offsets": [158]}, {"text": "bert \u2019 s resistance", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["bert", "\u2019", "s", "resistance"], "offsets": [159, 160, 161, 162]}, {"text": "to textual adversarial attacks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["to", "textual", "adversarial", "attacks"], "offsets": [163, 164, 165, 166]}], "trigger": {"text": "improves", "tokens": ["improves"], "offsets": [158]}}, {"event_type": "FIN", "arguments": [{"text": "achieves", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["achieves"], "offsets": [173]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [154]}}, {"event_type": "FAC", "arguments": [{"text": "flooding method", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["flooding", "method"], "offsets": [62, 63]}, {"text": "state - of - the - art robust accuracy", "nugget_type": "STR", "argument_type": "Object", "tokens": ["state", "-", "of", "-", "the", "-", "art", "robust", "accuracy"], "offsets": [174, 175, 176, 177, 178, 179, 180, 181, 182]}, {"text": "various text classification", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["various", "text", "classification"], "offsets": [184, 185, 186]}, {"text": "glue tasks", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["glue", "tasks"], "offsets": [188, 189]}], "trigger": {"text": "achieves", "tokens": ["achieves"], "offsets": [173]}}], "document": ["adversarial", "robustness", "has", "attracted", "much", "attention", "recently", ",", "and", "the", "mainstream", "solution", "is", "adversarial", "training", ".", "however", ",", "the", "tradition", "of", "generating", "adversarial", "perturbations", "for", "each", "input", "embedding", "(", "in", "the", "settings", "of", "nlp", ")", "scales", "up", "the", "training", "computational", "complexity", "by", "the", "number", "of", "gradient", "steps", "it", "takes", "to", "obtain", "the", "adversarial", "samples", ".", "to", "address", "this", "problem", ",", "we", "leverage", "flooding", "method", "which", "primarily", "aims", "at", "better", "generalization", "and", "we", "find", "promising", "in", "defending", "adversarial", "attacks", ".", "we", "further", "propose", "an", "effective", "criterion", "to", "bring", "hyper", "-", "parameter", "-", "dependent", "flooding", "into", "effect", "with", "a", "narrowed", "-", "down", "search", "space", "by", "measuring", "how", "the", "gradient", "steps", "taken", "within", "one", "epoch", "affect", "the", "loss", "of", "each", "batch", ".", "our", "approach", "requires", "zero", "adversarial", "sample", "for", "training", ",", "and", "its", "time", "consumption", "is", "equivalent", "to", "fine", "-", "tuning", ",", "which", "can", "be", "2", "-", "15", "times", "faster", "than", "standard", "adversarial", "training", ".", "we", "experimentally", "show", "that", "our", "method", "improves", "bert", "\u2019", "s", "resistance", "to", "textual", "adversarial", "attacks", "by", "a", "large", "margin", ",", "and", "achieves", "state", "-", "of", "-", "the", "-", "art", "robust", "accuracy", "on", "various", "text", "classification", "and", "glue", "tasks", "."]}, {"venue": "ACL", "title": "ChID: A Large-scale Chinese IDiom Dataset for Cloze Test", "abstract": "Cloze-style reading comprehension in Chinese is still limited due to the lack of various corpora. In this paper we propose a large-scale Chinese cloze test dataset ChID, which studies the comprehension of idiom, a unique language phenomenon in Chinese. In this corpus, the idioms in a passage are replaced by blank symbols and the correct answer needs to be chosen from well-designed candidate idioms. We carefully study how the design of candidate idioms and the representation of idioms affect the performance of state-of-the-art models. Results show that the machine accuracy is substantially worse than that of human, indicating a large space for further research.", "doc_id": "cb78ad97d226411c45ad4b952a84c85f", "publication_year": 2019, "sentences": ["cloze - style reading comprehension in chinese is still limited due to the lack of various corpora .", "in this paper we propose a large - scale chinese cloze test dataset chid , which studies the comprehension of idiom , a unique language phenomenon in chinese .", "in this corpus , the idioms in a passage are replaced by blank symbols and the correct answer needs to be chosen from well - designed candidate idioms .", "we carefully study how the design of candidate idioms and the representation of idioms affect the performance of state - of - the - art models .", "results show that the machine accuracy is substantially worse than that of human , indicating a large space for further research ."], "events": [{"event_type": "RWF", "arguments": [{"text": "cloze - style reading comprehension in chinese", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["cloze", "-", "style", "reading", "comprehension", "in", "chinese"], "offsets": [0, 1, 2, 3, 4, 5, 6]}], "trigger": {"text": "limited", "tokens": ["limited"], "offsets": [9]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [21]}, {"text": "large - scale chinese cloze test dataset", "nugget_type": "DST", "argument_type": "Content", "tokens": ["large", "-", "scale", "chinese", "cloze", "test", "dataset"], "offsets": [24, 25, 26, 27, 28, 29, 30]}, {"text": "studies", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["studies"], "offsets": [34]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [22]}}, {"event_type": "PUR", "arguments": [{"text": "comprehension of idiom", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["comprehension", "of", "idiom"], "offsets": [36, 37, 38]}], "trigger": {"text": "studies", "tokens": ["studies"], "offsets": [34]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [76]}, {"text": "design of candidate idioms", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["design", "of", "candidate", "idioms"], "offsets": [81, 82, 83, 84]}, {"text": "representation of idioms", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["representation", "of", "idioms"], "offsets": [87, 88, 89]}], "trigger": {"text": "study", "tokens": ["study"], "offsets": [78]}}, {"event_type": "FIN", "arguments": [{"text": "substantially worse", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["substantially", "worse"], "offsets": [110, 111]}, {"text": "indicating", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["indicating"], "offsets": [117]}], "trigger": {"text": "show", "tokens": ["show"], "offsets": [104]}}, {"event_type": "CMP", "arguments": [{"text": "substantially worse", "nugget_type": "WEA", "argument_type": "Result", "tokens": ["substantially", "worse"], "offsets": [110, 111]}, {"text": "machine accuracy", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["machine", "accuracy"], "offsets": [107, 108]}], "trigger": {"text": "substantially worse", "tokens": ["substantially", "worse"], "offsets": [110, 111]}}, {"event_type": "FAC", "arguments": [{"text": "large space for further research", "nugget_type": "TAK", "argument_type": "Subject", "tokens": ["large", "space", "for", "further", "research"], "offsets": [119, 120, 121, 122, 123]}], "trigger": {"text": "indicating", "tokens": ["indicating"], "offsets": [117]}}, {"event_type": "RWF", "arguments": [{"text": "cloze - style reading comprehension in chinese", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["cloze", "-", "style", "reading", "comprehension", "in", "chinese"], "offsets": [0, 1, 2, 3, 4, 5, 6]}, {"text": "lack", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["lack"], "offsets": [13]}], "trigger": {"text": "lack", "tokens": ["lack"], "offsets": [13]}}, {"event_type": "MDS", "arguments": [{"text": "idioms", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["idioms"], "offsets": [52]}, {"text": "blank symbols", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["blank", "symbols"], "offsets": [59, 60]}], "trigger": {"text": "replaced", "tokens": ["replaced"], "offsets": [57]}}, {"event_type": "MDS", "arguments": [{"text": "correct answer", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["correct", "answer"], "offsets": [63, 64]}, {"text": "well - designed candidate idioms", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["well", "-", "designed", "candidate", "idioms"], "offsets": [70, 71, 72, 73, 74]}], "trigger": {"text": "chosen", "tokens": ["chosen"], "offsets": [68]}}], "document": ["cloze", "-", "style", "reading", "comprehension", "in", "chinese", "is", "still", "limited", "due", "to", "the", "lack", "of", "various", "corpora", ".", "in", "this", "paper", "we", "propose", "a", "large", "-", "scale", "chinese", "cloze", "test", "dataset", "chid", ",", "which", "studies", "the", "comprehension", "of", "idiom", ",", "a", "unique", "language", "phenomenon", "in", "chinese", ".", "in", "this", "corpus", ",", "the", "idioms", "in", "a", "passage", "are", "replaced", "by", "blank", "symbols", "and", "the", "correct", "answer", "needs", "to", "be", "chosen", "from", "well", "-", "designed", "candidate", "idioms", ".", "we", "carefully", "study", "how", "the", "design", "of", "candidate", "idioms", "and", "the", "representation", "of", "idioms", "affect", "the", "performance", "of", "state", "-", "of", "-", "the", "-", "art", "models", ".", "results", "show", "that", "the", "machine", "accuracy", "is", "substantially", "worse", "than", "that", "of", "human", ",", "indicating", "a", "large", "space", "for", "further", "research", "."]}, {"venue": "ACL", "title": "ReCLIP: A Strong Zero-Shot Baseline for Referring Expression Comprehension", "abstract": "Training a referring expression comprehension (ReC) model for a new visual domain requires collecting referring expressions, and potentially corresponding bounding boxes, for images in the domain. While large-scale pre-trained models are useful for image classification across domains, it remains unclear if they can be applied in a zero-shot manner to more complex tasks like ReC. We present ReCLIP, a simple but strong zero-shot baseline that repurposes CLIP, a state-of-the-art large-scale model, for ReC. Motivated by the close connection between ReC and CLIP\u2019s contrastive pre-training objective, the first component of ReCLIP is a region-scoring method that isolates object proposals via cropping and blurring, and passes them to CLIP. However, through controlled experiments on a synthetic dataset, we find that CLIP is largely incapable of performing spatial reasoning off-the-shelf. We reduce the gap between zero-shot baselines from prior work and supervised models by as much as 29% on RefCOCOg, and on RefGTA (video game imagery), ReCLIP\u2019s relative improvement over supervised ReC models trained on real images is 8%.", "doc_id": "8f0599a807b17e41dde70e2a992d8a61", "publication_year": 2022, "sentences": ["training a referring expression comprehension ( rec ) model for a new visual domain requires collecting referring expressions , and potentially corresponding bounding boxes , for images in the domain .", "while large - scale pre - trained models are useful for image classification across domains , it remains unclear if they can be applied in a zero - shot manner to more complex tasks like rec .", "we present reclip , a simple but strong zero - shot baseline that repurposes clip , a state - of - the - art large - scale model , for rec . motivated by the close connection between rec and clip \u2019 s contrastive pre - training objective , the first component of reclip is a region - scoring method that isolates object proposals via cropping and blurring , and passes them to clip .", "however , through controlled experiments on a synthetic dataset , we find that clip is largely incapable of performing spatial reasoning off - the - shelf .", "we reduce the gap between zero - shot baselines from prior work and supervised models by as much as 29 % on refcocog , and on refgta ( video game imagery ) , reclip \u2019 s relative improvement over supervised rec models trained on real images is 8 % ."], "events": [{"event_type": "ITT", "arguments": [{"text": "referring expression comprehension ( rec ) model", "nugget_type": "APP", "argument_type": "Target", "tokens": ["referring", "expression", "comprehension", "(", "rec", ")", "model"], "offsets": [2, 3, 4, 5, 6, 7, 8]}], "trigger": {"text": "training", "tokens": ["training"], "offsets": [0]}}, {"event_type": "RWF", "arguments": [{"text": "large - scale pre - trained models", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["large", "-", "scale", "pre", "-", "trained", "models"], "offsets": [32, 33, 34, 35, 36, 37, 38]}, {"text": "unclear", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["unclear"], "offsets": [49]}], "trigger": {"text": "remains", "tokens": ["remains"], "offsets": [48]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [68]}, {"text": "zero - shot baseline", "nugget_type": "APP", "argument_type": "Content", "tokens": ["zero", "-", "shot", "baseline"], "offsets": [76, 77, 78, 79]}, {"text": "repurposes", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["repurposes"], "offsets": [81]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [69]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [153]}, {"text": "largely incapable", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["largely", "incapable"], "offsets": [158, 159]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [154]}}, {"event_type": "FAC", "arguments": [{"text": "clip", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["clip"], "offsets": [156]}, {"text": "performing", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["performing"], "offsets": [161]}, {"text": "synthetic dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["synthetic", "dataset"], "offsets": [150, 151]}], "trigger": {"text": "largely incapable", "tokens": ["largely", "incapable"], "offsets": [158, 159]}}, {"event_type": "PUR", "arguments": [{"text": "clip", "nugget_type": "APP", "argument_type": "Aim", "tokens": ["clip"], "offsets": [82]}, {"text": "for rec", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["for", "rec"], "offsets": [97, 98]}], "trigger": {"text": "repurposes", "tokens": ["repurposes"], "offsets": [81]}}, {"event_type": "MDS", "arguments": [{"text": "region - scoring method", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["region", "-", "scoring", "method"], "offsets": [124, 125, 126, 127]}, {"text": "isolates", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["isolates"], "offsets": [129]}, {"text": "cropping", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["cropping"], "offsets": [133]}, {"text": "blurring", "nugget_type": "APP", "argument_type": "TriedComponent", "tokens": ["blurring"], "offsets": [135]}], "trigger": {"text": "via", "tokens": ["via"], "offsets": [132]}}, {"event_type": "PUR", "arguments": [{"text": "object proposals", "nugget_type": "FEA", "argument_type": "Aim", "tokens": ["object", "proposals"], "offsets": [130, 131]}], "trigger": {"text": "isolates", "tokens": ["isolates"], "offsets": [129]}}, {"event_type": "MDS", "arguments": [{"text": "clip", "nugget_type": "APP", "argument_type": "BaseComponent", "tokens": ["clip"], "offsets": [141]}, {"text": "object proposals", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["object", "proposals"], "offsets": [130, 131]}], "trigger": {"text": "passes", "tokens": ["passes"], "offsets": [138]}}, {"event_type": "PUR", "arguments": [{"text": "spatial reasoning off - the - shelf", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["spatial", "reasoning", "off", "-", "the", "-", "shelf"], "offsets": [162, 163, 164, 165, 166, 167, 168]}], "trigger": {"text": "performing", "tokens": ["performing"], "offsets": [161]}}, {"event_type": "FAC", "arguments": [{"text": "gap between zero - shot baselines from prior work and supervised models", "nugget_type": "WEA", "argument_type": "Subject", "tokens": ["gap", "between", "zero", "-", "shot", "baselines", "from", "prior", "work", "and", "supervised", "models"], "offsets": [173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184]}, {"text": "29 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["29", "%"], "offsets": [189, 190]}, {"text": "on refcocog", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "refcocog"], "offsets": [191, 192]}], "trigger": {"text": "reduce", "tokens": ["reduce"], "offsets": [171]}}, {"event_type": "CMP", "arguments": [{"text": "on refgta", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "refgta"], "offsets": [195, 196]}, {"text": "supervised rec models", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["supervised", "rec", "models"], "offsets": [209, 210, 211]}, {"text": "8 %", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["8", "%"], "offsets": [217, 218]}, {"text": "relative improvement", "nugget_type": "STR", "argument_type": "Result", "tokens": ["relative", "improvement"], "offsets": [206, 207]}, {"text": "reclip", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["reclip"], "offsets": [203]}], "trigger": {"text": "over", "tokens": ["over"], "offsets": [208]}}], "document": ["training", "a", "referring", "expression", "comprehension", "(", "rec", ")", "model", "for", "a", "new", "visual", "domain", "requires", "collecting", "referring", "expressions", ",", "and", "potentially", "corresponding", "bounding", "boxes", ",", "for", "images", "in", "the", "domain", ".", "while", "large", "-", "scale", "pre", "-", "trained", "models", "are", "useful", "for", "image", "classification", "across", "domains", ",", "it", "remains", "unclear", "if", "they", "can", "be", "applied", "in", "a", "zero", "-", "shot", "manner", "to", "more", "complex", "tasks", "like", "rec", ".", "we", "present", "reclip", ",", "a", "simple", "but", "strong", "zero", "-", "shot", "baseline", "that", "repurposes", "clip", ",", "a", "state", "-", "of", "-", "the", "-", "art", "large", "-", "scale", "model", ",", "for", "rec", ".", "motivated", "by", "the", "close", "connection", "between", "rec", "and", "clip", "\u2019", "s", "contrastive", "pre", "-", "training", "objective", ",", "the", "first", "component", "of", "reclip", "is", "a", "region", "-", "scoring", "method", "that", "isolates", "object", "proposals", "via", "cropping", "and", "blurring", ",", "and", "passes", "them", "to", "clip", ".", "however", ",", "through", "controlled", "experiments", "on", "a", "synthetic", "dataset", ",", "we", "find", "that", "clip", "is", "largely", "incapable", "of", "performing", "spatial", "reasoning", "off", "-", "the", "-", "shelf", ".", "we", "reduce", "the", "gap", "between", "zero", "-", "shot", "baselines", "from", "prior", "work", "and", "supervised", "models", "by", "as", "much", "as", "29", "%", "on", "refcocog", ",", "and", "on", "refgta", "(", "video", "game", "imagery", ")", ",", "reclip", "\u2019", "s", "relative", "improvement", "over", "supervised", "rec", "models", "trained", "on", "real", "images", "is", "8", "%", "."]}, {"venue": "ACL", "title": "Situated Dialogue Learning through Procedural Environment Generation", "abstract": "We teach goal-driven agents to interactively act and speak in situated environments by training on generated curriculums. Our agents operate in LIGHT (Urbanek et al. 2019)\u2014a large-scale crowd-sourced fantasy text adventure game wherein an agent perceives and interacts with the world through textual natural language. Goals in this environment take the form of character-based quests, consisting of personas and motivations. We augment LIGHT by learning to procedurally generate additional novel textual worlds and quests to create a curriculum of steadily increasing difficulty for training agents to achieve such goals. In particular, we measure curriculum difficulty in terms of the rarity of the quest in the original training distribution\u2014an easier environment is one that is more likely to have been found in the unaugmented dataset. An ablation study shows that this method of learning from the tail of a distribution results in significantly higher generalization abilities as measured by zero-shot performance on never-before-seen quests.", "doc_id": "52ef82bd748027fbbc1b67def523c6be", "publication_year": 2022, "sentences": ["we teach goal - driven agents to interactively act and speak in situated environments by training on generated curriculums .", "our agents operate in light ( urbanek et al . 2019 ) \u2014 a large - scale crowd - sourced fantasy text adventure game wherein an agent perceives and interacts with the world through textual natural language .", "goals in this environment take the form of character - based quests , consisting of personas and motivations .", "we augment light by learning to procedurally generate additional novel textual worlds and quests to create a curriculum of steadily increasing difficulty for training agents to achieve such goals .", "in particular , we measure curriculum difficulty in terms of the rarity of the quest in the original training distribution \u2014 an easier environment is one that is more likely to have been found in the unaugmented dataset .", "an ablation study shows that this method of learning from the tail of a distribution results in significantly higher generalization abilities as measured by zero - shot performance on never - before - seen quests ."], "events": [{"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [0]}, {"text": "goal - driven agents", "nugget_type": "MOD", "argument_type": "Content", "tokens": ["goal", "-", "driven", "agents"], "offsets": [2, 3, 4, 5]}, {"text": "on generated curriculums", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "generated", "curriculums"], "offsets": [16, 17, 18]}, {"text": "interactively act and speak", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["interactively", "act", "and", "speak"], "offsets": [7, 8, 9, 10]}], "trigger": {"text": "training", "tokens": ["training"], "offsets": [15]}}, {"event_type": "PUR", "arguments": [{"text": "in situated environments", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "situated", "environments"], "offsets": [11, 12, 13]}], "trigger": {"text": "interactively act and speak", "tokens": ["interactively", "act", "and", "speak"], "offsets": [7, 8, 9, 10]}}, {"event_type": "WKS", "arguments": [{"text": "in light", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "light"], "offsets": [23, 24]}], "trigger": {"text": "operate", "tokens": ["operate"], "offsets": [22]}}, {"event_type": "WKS", "arguments": [{"text": "through textual natural language", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["through", "textual", "natural", "language"], "offsets": [53, 54, 55, 56]}, {"text": "world", "nugget_type": "FEA", "argument_type": "Content", "tokens": ["world"], "offsets": [52]}], "trigger": {"text": "perceives and interacts", "tokens": ["perceives", "and", "interacts"], "offsets": [47, 48, 49]}}, {"event_type": "MDS", "arguments": [{"text": "character - based quests", "nugget_type": "FEA", "argument_type": "BaseComponent", "tokens": ["character", "-", "based", "quests"], "offsets": [66, 67, 68, 69]}, {"text": "personas", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["personas"], "offsets": [73]}, {"text": "motivations", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["motivations"], "offsets": [75]}], "trigger": {"text": "consisting", "tokens": ["consisting"], "offsets": [71]}}, {"event_type": "MDS", "arguments": [{"text": "additional novel textual worlds", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["additional", "novel", "textual", "worlds"], "offsets": [85, 86, 87, 88]}], "trigger": {"text": "generate", "tokens": ["generate"], "offsets": [84]}}, {"event_type": "MDS", "arguments": [{"text": "curriculum of steadily increasing difficulty", "nugget_type": "FEA", "argument_type": "TriedComponent", "tokens": ["curriculum", "of", "steadily", "increasing", "difficulty"], "offsets": [94, 95, 96, 97, 98]}, {"text": "training agents", "nugget_type": "MOD", "argument_type": "Target", "tokens": ["training", "agents"], "offsets": [100, 101]}, {"text": "achieve", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["achieve"], "offsets": [103]}], "trigger": {"text": "create", "tokens": ["create"], "offsets": [92]}}, {"event_type": "PUR", "arguments": [{"text": "such goals", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["character", "-", "based", "quests"], "offsets": [66, 67, 68, 69]}], "trigger": {"text": "achieve", "tokens": ["achieve"], "offsets": [103]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [110]}, {"text": "curriculum difficulty", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["curriculum", "difficulty"], "offsets": [112, 113]}, {"text": "in terms of the rarity of the quest in the original training distribution", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "terms", "of", "the", "rarity", "of", "the", "quest", "in", "the", "original", "training", "distribution"], "offsets": [114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126]}], "trigger": {"text": "measure", "tokens": ["measure"], "offsets": [111]}}, {"event_type": "FIN", "arguments": [{"text": "results", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["results"], "offsets": [161]}], "trigger": {"text": "shows", "tokens": ["shows"], "offsets": [149]}}, {"event_type": "CMP", "arguments": [{"text": "method of learning from the tail of a distribution", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["method", "of", "learning", "from", "the", "tail", "of", "a", "distribution"], "offsets": [152, 153, 154, 155, 156, 157, 158, 159, 160]}, {"text": "significantly", "nugget_type": "DEG", "argument_type": "Extent", "tokens": ["significantly"], "offsets": [163]}, {"text": "higher", "nugget_type": "STR", "argument_type": "Result", "tokens": ["higher"], "offsets": [164]}, {"text": "generalization abilities", "nugget_type": "TAK", "argument_type": "Metrics", "tokens": ["generalization", "abilities"], "offsets": [165, 166]}, {"text": "measured by zero - shot performance on never - before - seen quests", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["measured", "by", "zero", "-", "shot", "performance", "on", "never", "-", "before", "-", "seen", "quests"], "offsets": [168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180]}], "trigger": {"text": "results", "tokens": ["results"], "offsets": [161]}}], "document": ["we", "teach", "goal", "-", "driven", "agents", "to", "interactively", "act", "and", "speak", "in", "situated", "environments", "by", "training", "on", "generated", "curriculums", ".", "our", "agents", "operate", "in", "light", "(", "urbanek", "et", "al", ".", "2019", ")", "\u2014", "a", "large", "-", "scale", "crowd", "-", "sourced", "fantasy", "text", "adventure", "game", "wherein", "an", "agent", "perceives", "and", "interacts", "with", "the", "world", "through", "textual", "natural", "language", ".", "goals", "in", "this", "environment", "take", "the", "form", "of", "character", "-", "based", "quests", ",", "consisting", "of", "personas", "and", "motivations", ".", "we", "augment", "light", "by", "learning", "to", "procedurally", "generate", "additional", "novel", "textual", "worlds", "and", "quests", "to", "create", "a", "curriculum", "of", "steadily", "increasing", "difficulty", "for", "training", "agents", "to", "achieve", "such", "goals", ".", "in", "particular", ",", "we", "measure", "curriculum", "difficulty", "in", "terms", "of", "the", "rarity", "of", "the", "quest", "in", "the", "original", "training", "distribution", "\u2014", "an", "easier", "environment", "is", "one", "that", "is", "more", "likely", "to", "have", "been", "found", "in", "the", "unaugmented", "dataset", ".", "an", "ablation", "study", "shows", "that", "this", "method", "of", "learning", "from", "the", "tail", "of", "a", "distribution", "results", "in", "significantly", "higher", "generalization", "abilities", "as", "measured", "by", "zero", "-", "shot", "performance", "on", "never", "-", "before", "-", "seen", "quests", "."]}, {"venue": "ACL", "title": "Understanding Attention for Text Classification", "abstract": "Attention has been proven successful in many natural language processing (NLP) tasks. Recently, many researchers started to investigate the interpretability of attention on NLP tasks. Many existing approaches focused on examining whether the local attention weights could reflect the importance of input representations. In this work, we present a study on understanding the internal mechanism of attention by looking into the gradient update process, checking its behavior when approaching a local minimum during training. We propose to analyze for each word token the following two quantities: its polarity score and its attention score, where the latter is a global assessment on the token\u2019s significance. We discuss conditions under which the attention mechanism may become more (or less) interpretable, and show how the interplay between the two quantities can contribute towards model performance.", "doc_id": "4420a962de5b28bec713ccc77de0fd9d", "publication_year": 2020, "sentences": ["attention has been proven successful in many natural language processing ( nlp ) tasks .", "recently , many researchers started to investigate the interpretability of attention on nlp tasks .", "many existing approaches focused on examining whether the local attention weights could reflect the importance of input representations .", "in this work , we present a study on understanding the internal mechanism of attention by looking into the gradient update process , checking its behavior when approaching a local minimum during training .", "we propose to analyze for each word token the following two quantities : its polarity score and its attention score , where the latter is a global assessment on the token \u2019 s significance .", "we discuss conditions under which the attention mechanism may become more ( or less ) interpretable , and show how the interplay between the two quantities can contribute towards model performance ."], "events": [{"event_type": "ITT", "arguments": [{"text": "interpretability of attention", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["interpretability", "of", "attention"], "offsets": [23, 24, 25]}], "trigger": {"text": "investigate", "tokens": ["investigate"], "offsets": [21]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [53]}, {"text": "study", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["study"], "offsets": [56]}], "trigger": {"text": "present", "tokens": ["present"], "offsets": [54]}}, {"event_type": "WKS", "arguments": [{"text": "gradient update process", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["gradient", "update", "process"], "offsets": [68, 69, 70]}, {"text": "understanding", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["understanding"], "offsets": [58]}], "trigger": {"text": "looking", "tokens": ["looking"], "offsets": [65]}}, {"event_type": "PUR", "arguments": [{"text": "internal mechanism of attention", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["internal", "mechanism", "of", "attention"], "offsets": [60, 61, 62, 63]}], "trigger": {"text": "understanding", "tokens": ["understanding"], "offsets": [58]}}, {"event_type": "WKS", "arguments": [{"text": "its behavior", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["internal", "mechanism", "of", "attention", "behavior"], "offsets": [60, 61, 62, 63, 74]}, {"text": "when approaching a local minimum during training", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["when", "approaching", "a", "local", "minimum", "during", "training"], "offsets": [75, 76, 77, 78, 79, 80, 81]}], "trigger": {"text": "checking", "tokens": ["checking"], "offsets": [72]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [83]}, {"text": "each word token", "nugget_type": "FEA", "argument_type": "Target", "tokens": ["each", "word", "token"], "offsets": [88, 89, 90]}, {"text": "polarity score", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["polarity", "score"], "offsets": [97, 98]}, {"text": "attention score", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["attention", "score"], "offsets": [101, 102]}], "trigger": {"text": "analyze", "tokens": ["analyze"], "offsets": [86]}}], "document": ["attention", "has", "been", "proven", "successful", "in", "many", "natural", "language", "processing", "(", "nlp", ")", "tasks", ".", "recently", ",", "many", "researchers", "started", "to", "investigate", "the", "interpretability", "of", "attention", "on", "nlp", "tasks", ".", "many", "existing", "approaches", "focused", "on", "examining", "whether", "the", "local", "attention", "weights", "could", "reflect", "the", "importance", "of", "input", "representations", ".", "in", "this", "work", ",", "we", "present", "a", "study", "on", "understanding", "the", "internal", "mechanism", "of", "attention", "by", "looking", "into", "the", "gradient", "update", "process", ",", "checking", "its", "behavior", "when", "approaching", "a", "local", "minimum", "during", "training", ".", "we", "propose", "to", "analyze", "for", "each", "word", "token", "the", "following", "two", "quantities", ":", "its", "polarity", "score", "and", "its", "attention", "score", ",", "where", "the", "latter", "is", "a", "global", "assessment", "on", "the", "token", "\u2019", "s", "significance", ".", "we", "discuss", "conditions", "under", "which", "the", "attention", "mechanism", "may", "become", "more", "(", "or", "less", ")", "interpretable", ",", "and", "show", "how", "the", "interplay", "between", "the", "two", "quantities", "can", "contribute", "towards", "model", "performance", "."]}, {"venue": "ACL", "title": "An Empirical Study on Hyperparameter Optimization for Fine-Tuning Pre-trained Language Models", "abstract": "The performance of fine-tuning pre-trained language models largely depends on the hyperparameter configuration. In this paper, we investigate the performance of modern hyperparameter optimization methods (HPO) on fine-tuning pre-trained language models. First, we study and report three HPO algorithms\u2019 performances on fine-tuning two state-of-the-art language models on the GLUE dataset. We find that using the same time budget, HPO often fails to outperform grid search due to two reasons: insufficient time budget and overfitting. We propose two general strategies and an experimental procedure to systematically troubleshoot HPO\u2019s failure cases. By applying the procedure, we observe that HPO can succeed with more appropriate settings in the search space and time budget; however, in certain cases overfitting remains. Finally, we make suggestions for future work. Our implementation can be found in https://github.com/microsoft/FLAML/tree/main/flaml/nlp/", "doc_id": "2b7c6fae293408579c51054d15c3f123", "publication_year": 2021, "sentences": ["the performance of fine - tuning pre - trained language models largely depends on the hyperparameter configuration .", "in this paper , we investigate the performance of modern hyperparameter optimization methods ( hpo ) on fine - tuning pre - trained language models .", "first , we study and report three hpo algorithms \u2019 performances on fine - tuning two state - of - the - art language models on the glue dataset .", "we find that using the same time budget , hpo often fails to outperform grid search due to two reasons : insufficient time budget and overfitting .", "we propose two general strategies and an experimental procedure to systematically troubleshoot hpo \u2019 s failure cases .", "by applying the procedure , we observe that hpo can succeed with more appropriate settings in the search space and time budget ; however , in certain cases overfitting remains .", "finally , we make suggestions for future work .", "our implementation can be found in https : / / github . com / microsoft / flaml / tree / main / flaml / nlp /"], "events": [{"event_type": "ITT", "arguments": [{"text": "hyperparameter configuration", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["hyperparameter", "configuration"], "offsets": [15, 16]}], "trigger": {"text": "depends", "tokens": ["depends"], "offsets": [12]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [22]}, {"text": "performance of modern hyperparameter optimization methods", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["performance", "of", "modern", "hyperparameter", "optimization", "methods"], "offsets": [25, 26, 27, 28, 29, 30]}], "trigger": {"text": "investigate", "tokens": ["investigate"], "offsets": [23]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [46]}, {"text": "three hpo algorithms \u2019 performances", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["three", "hpo", "algorithms", "\u2019", "performances"], "offsets": [50, 51, 52, 53, 54]}, {"text": "on fine - tuning two state - of - the - art language models", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "fine", "-", "tuning", "two", "state", "-", "of", "-", "the", "-", "art", "language", "models"], "offsets": [55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68]}, {"text": "glue dataset", "nugget_type": "DST", "argument_type": "Dataset", "tokens": ["glue", "dataset"], "offsets": [71, 72]}], "trigger": {"text": "study and report", "tokens": ["study", "and", "report"], "offsets": [47, 48, 49]}}, {"event_type": "FIN", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [74]}, {"text": "outperform", "nugget_type": "E-CMP", "argument_type": "Content", "tokens": ["outperform"], "offsets": [87]}], "trigger": {"text": "find", "tokens": ["find"], "offsets": [75]}}, {"event_type": "CMP", "arguments": [{"text": "hyperparameter optimization methods", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["hyperparameter", "optimization", "methods"], "offsets": [28, 29, 30]}, {"text": "fails", "nugget_type": "WEA", "argument_type": "Result", "tokens": ["fails"], "offsets": [85]}, {"text": "grid search", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["grid", "search"], "offsets": [88, 89]}, {"text": "using the same time budget", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["using", "the", "same", "time", "budget"], "offsets": [77, 78, 79, 80, 81]}], "trigger": {"text": "outperform", "tokens": ["outperform"], "offsets": [87]}}, {"event_type": "PRP", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Proposer", "tokens": ["we"], "offsets": [101]}, {"text": "two general strategies", "nugget_type": "APP", "argument_type": "Content", "tokens": ["two", "general", "strategies"], "offsets": [103, 104, 105]}, {"text": "experimental procedure", "nugget_type": "APP", "argument_type": "Content", "tokens": ["experimental", "procedure"], "offsets": [108, 109]}, {"text": "troubleshoot", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["troubleshoot"], "offsets": [112]}], "trigger": {"text": "propose", "tokens": ["propose"], "offsets": [102]}}, {"event_type": "PUR", "arguments": [{"text": "hpo \u2019 s failure cases", "nugget_type": "WEA", "argument_type": "Aim", "tokens": ["hpo", "\u2019", "s", "failure", "cases"], "offsets": [113, 114, 115, 116, 117]}], "trigger": {"text": "troubleshoot", "tokens": ["troubleshoot"], "offsets": [112]}}, {"event_type": "FIN", "arguments": [{"text": "remains", "nugget_type": "E-FAC", "argument_type": "Content", "tokens": ["remains"], "offsets": [148]}, {"text": "we", "nugget_type": "OG", "argument_type": "Finder", "tokens": ["we"], "offsets": [124]}], "trigger": {"text": "observe", "tokens": ["observe"], "offsets": [125]}}, {"event_type": "FAC", "arguments": [{"text": "hyperparameter optimization methods", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["hyperparameter", "optimization", "methods"], "offsets": [28, 29, 30]}, {"text": "more appropriate settings", "nugget_type": "STR", "argument_type": "Object", "tokens": ["more", "appropriate", "settings"], "offsets": [131, 132, 133]}, {"text": "in the search space and time budget", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "the", "search", "space", "and", "time", "budget"], "offsets": [134, 135, 136, 137, 138, 139, 140]}], "trigger": {"text": "succeed", "tokens": ["succeed"], "offsets": [129]}}, {"event_type": "FAC", "arguments": [{"text": "in certain cases", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["in", "certain", "cases"], "offsets": [144, 145, 146]}, {"text": "overfitting", "nugget_type": "WEA", "argument_type": "Subject", "tokens": ["overfitting"], "offsets": [147]}], "trigger": {"text": "remains", "tokens": ["remains"], "offsets": [148]}}], "document": ["the", "performance", "of", "fine", "-", "tuning", "pre", "-", "trained", "language", "models", "largely", "depends", "on", "the", "hyperparameter", "configuration", ".", "in", "this", "paper", ",", "we", "investigate", "the", "performance", "of", "modern", "hyperparameter", "optimization", "methods", "(", "hpo", ")", "on", "fine", "-", "tuning", "pre", "-", "trained", "language", "models", ".", "first", ",", "we", "study", "and", "report", "three", "hpo", "algorithms", "\u2019", "performances", "on", "fine", "-", "tuning", "two", "state", "-", "of", "-", "the", "-", "art", "language", "models", "on", "the", "glue", "dataset", ".", "we", "find", "that", "using", "the", "same", "time", "budget", ",", "hpo", "often", "fails", "to", "outperform", "grid", "search", "due", "to", "two", "reasons", ":", "insufficient", "time", "budget", "and", "overfitting", ".", "we", "propose", "two", "general", "strategies", "and", "an", "experimental", "procedure", "to", "systematically", "troubleshoot", "hpo", "\u2019", "s", "failure", "cases", ".", "by", "applying", "the", "procedure", ",", "we", "observe", "that", "hpo", "can", "succeed", "with", "more", "appropriate", "settings", "in", "the", "search", "space", "and", "time", "budget", ";", "however", ",", "in", "certain", "cases", "overfitting", "remains", ".", "finally", ",", "we", "make", "suggestions", "for", "future", "work", ".", "our", "implementation", "can", "be", "found", "in", "https", ":", "/", "/", "github", ".", "com", "/", "microsoft", "/", "flaml", "/", "tree", "/", "main", "/", "flaml", "/", "nlp", "/"]}, {"venue": "ACL", "title": "Domain-Adaptive Pretraining Methods for Dialogue Understanding", "abstract": "Language models like BERT and SpanBERT pretrained on open-domain data have obtained impressive gains on various NLP tasks. In this paper, we probe the effectiveness of domain-adaptive pretraining objectives on downstream tasks. In particular, three objectives, including a novel objective focusing on modeling predicate-argument relations, are evaluated on two challenging dialogue understanding tasks. Experimental results demonstrate that domain-adaptive pretraining with proper objectives can significantly improve the performance of a strong baseline on these tasks, achieving the new state-of-the-art performances.", "doc_id": "c66e573c68fc2fc3a4d9904717bd5e55", "publication_year": 2021, "sentences": ["language models like bert and spanbert pretrained on open - domain data have obtained impressive gains on various nlp tasks .", "in this paper , we probe the effectiveness of domain - adaptive pretraining objectives on downstream tasks .", "in particular , three objectives , including a novel objective focusing on modeling predicate - argument relations , are evaluated on two challenging dialogue understanding tasks .", "experimental results demonstrate that domain - adaptive pretraining with proper objectives can significantly improve the performance of a strong baseline on these tasks , achieving the new state - of - the - art performances ."], "events": [{"event_type": "ITT", "arguments": [{"text": "language models", "nugget_type": "APP", "argument_type": "Target", "tokens": ["language", "models"], "offsets": [0, 1]}, {"text": "on various nlp tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "various", "nlp", "tasks"], "offsets": [16, 17, 18, 19]}], "trigger": {"text": "obtained", "tokens": ["obtained"], "offsets": [13]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [25]}, {"text": "effectiveness of domain - adaptive pretraining objectives", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["effectiveness", "of", "domain", "-", "adaptive", "pretraining", "objectives"], "offsets": [28, 29, 30, 31, 32, 33, 34]}], "trigger": {"text": "probe", "tokens": ["probe"], "offsets": [26]}}, {"event_type": "WKS", "arguments": [{"text": "on two challenging dialogue understanding tasks", "nugget_type": "LIM", "argument_type": "Condition", "tokens": ["on", "two", "challenging", "dialogue", "understanding", "tasks"], "offsets": [59, 60, 61, 62, 63, 64]}, {"text": "three objectives", "nugget_type": "TAK", "argument_type": "Content", "tokens": ["three", "objectives"], "offsets": [42, 43]}], "trigger": {"text": "evaluated", "tokens": ["evaluated"], "offsets": [58]}}, {"event_type": "FAC", "arguments": [{"text": "performance of a strong baseline on these tasks", "nugget_type": "TAK", "argument_type": "Object", "tokens": ["performance", "of", "a", "strong", "baseline", "on", "these", "tasks"], "offsets": [81, 82, 83, 84, 85, 86, 87, 88]}, {"text": "domain - adaptive pretraining", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["domain", "-", "adaptive", "pretraining"], "offsets": [70, 71, 72, 73]}], "trigger": {"text": "significantly improve", "tokens": ["significantly", "improve"], "offsets": [78, 79]}}, {"event_type": "FAC", "arguments": [{"text": "domain - adaptive pretraining", "nugget_type": "APP", "argument_type": "Subject", "tokens": ["domain", "-", "adaptive", "pretraining"], "offsets": [70, 71, 72, 73]}, {"text": "new state - of - the - art performances", "nugget_type": "STR", "argument_type": "Object", "tokens": ["new", "state", "-", "of", "-", "the", "-", "art", "performances"], "offsets": [92, 93, 94, 95, 96, 97, 98, 99, 100]}], "trigger": {"text": "achieving", "tokens": ["achieving"], "offsets": [90]}}], "document": ["language", "models", "like", "bert", "and", "spanbert", "pretrained", "on", "open", "-", "domain", "data", "have", "obtained", "impressive", "gains", "on", "various", "nlp", "tasks", ".", "in", "this", "paper", ",", "we", "probe", "the", "effectiveness", "of", "domain", "-", "adaptive", "pretraining", "objectives", "on", "downstream", "tasks", ".", "in", "particular", ",", "three", "objectives", ",", "including", "a", "novel", "objective", "focusing", "on", "modeling", "predicate", "-", "argument", "relations", ",", "are", "evaluated", "on", "two", "challenging", "dialogue", "understanding", "tasks", ".", "experimental", "results", "demonstrate", "that", "domain", "-", "adaptive", "pretraining", "with", "proper", "objectives", "can", "significantly", "improve", "the", "performance", "of", "a", "strong", "baseline", "on", "these", "tasks", ",", "achieving", "the", "new", "state", "-", "of", "-", "the", "-", "art", "performances", "."]}, {"venue": "ACL", "title": "Context Matters: A Pragmatic Study of PLMs\u2019 Negation Understanding", "abstract": "In linguistics, there are two main perspectives on negation: a semantic and a pragmatic view. So far, research in NLP on negation has almost exclusively adhered to the semantic view. In this article, we adopt the pragmatic paradigm to conduct a study of negation understanding focusing on transformer-based PLMs. Our results differ from previous, semantics-based studies and therefore help to contribute a more comprehensive \u2013 and, given the results, much more optimistic \u2013 picture of the PLMs\u2019 negation understanding.", "doc_id": "8db50461c3bbe398b8fa02879a6a3b34", "publication_year": 2022, "sentences": ["in linguistics , there are two main perspectives on negation : a semantic and a pragmatic view .", "so far , research in nlp on negation has almost exclusively adhered to the semantic view .", "in this article , we adopt the pragmatic paradigm to conduct a study of negation understanding focusing on transformer - based plms .", "our results differ from previous , semantics - based studies and therefore help to contribute a more comprehensive \u2013 and , given the results , much more optimistic \u2013 picture of the plms \u2019 negation understanding ."], "events": [{"event_type": "ITT", "arguments": [{"text": "negation", "nugget_type": "TAK", "argument_type": "Target", "tokens": ["negation"], "offsets": [9]}], "trigger": {"text": "perspectives", "tokens": ["perspectives"], "offsets": [7]}}, {"event_type": "WKS", "arguments": [{"text": "we", "nugget_type": "OG", "argument_type": "Researcher", "tokens": ["we"], "offsets": [39]}, {"text": "pragmatic paradigm", "nugget_type": "APP", "argument_type": "Content", "tokens": ["pragmatic", "paradigm"], "offsets": [42, 43]}, {"text": "conduct", "nugget_type": "E-PUR", "argument_type": "Target", "tokens": ["conduct"], "offsets": [45]}], "trigger": {"text": "adopt", "tokens": ["adopt"], "offsets": [40]}}, {"event_type": "PUR", "arguments": [{"text": "study of negation understanding focusing on transformer - based plms", "nugget_type": "TAK", "argument_type": "Aim", "tokens": ["study", "of", "negation", "understanding", "focusing", "on", "transformer", "-", "based", "plms"], "offsets": [47, 48, 49, 50, 51, 52, 53, 54, 55, 56]}], "trigger": {"text": "conduct", "tokens": ["conduct"], "offsets": [45]}}, {"event_type": "CMP", "arguments": [{"text": "pragmatic paradigm", "nugget_type": "APP", "argument_type": "Arg1", "tokens": ["pragmatic", "paradigm"], "offsets": [42, 43]}, {"text": "previous , semantics - based studies", "nugget_type": "APP", "argument_type": "Arg2", "tokens": ["previous", ",", "semantics", "-", "based", "studies"], "offsets": [62, 63, 64, 65, 66, 67]}], "trigger": {"text": "differ", "tokens": ["differ"], "offsets": [60]}}, {"event_type": "RWF", "arguments": [{"text": "research", "nugget_type": "APP", "argument_type": "Concern", "tokens": ["research"], "offsets": [21]}, {"text": "exclusively adhered", "nugget_type": "WEA", "argument_type": "Fault", "tokens": ["exclusively", "adhered"], "offsets": [28, 29]}], "trigger": {"text": "exclusively adhered", "tokens": ["exclusively", "adhered"], "offsets": [28, 29]}}], "document": ["in", "linguistics", ",", "there", "are", "two", "main", "perspectives", "on", "negation", ":", "a", "semantic", "and", "a", "pragmatic", "view", ".", "so", "far", ",", "research", "in", "nlp", "on", "negation", "has", "almost", "exclusively", "adhered", "to", "the", "semantic", "view", ".", "in", "this", "article", ",", "we", "adopt", "the", "pragmatic", "paradigm", "to", "conduct", "a", "study", "of", "negation", "understanding", "focusing", "on", "transformer", "-", "based", "plms", ".", "our", "results", "differ", "from", "previous", ",", "semantics", "-", "based", "studies", "and", "therefore", "help", "to", "contribute", "a", "more", "comprehensive", "\u2013", "and", ",", "given", "the", "results", ",", "much", "more", "optimistic", "\u2013", "picture", "of", "the", "plms", "\u2019", "negation", "understanding", "."]}]